Merge tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 Feb 2015 17:14:43 +0000 (09:14 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 Feb 2015 17:14:43 +0000 (09:14 -0800)
Pull lguest fixes from Rusty Russell:
 "Lguest weird config build fix, and update to the documentation"

* tag 'fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux:
  lguest: update help text.
  lguest: now depends on PCI

779 files changed:
Documentation/ABI/testing/sysfs-driver-samsung-laptop
Documentation/ABI/testing/sysfs-driver-toshiba_acpi [new file with mode: 0644]
Documentation/DocBook/kgdb.tmpl
Documentation/clk.txt
Documentation/device-mapper/dm-crypt.txt
Documentation/devicetree/bindings/clock/exynos7-clock.txt
Documentation/devicetree/bindings/clock/nvidia,tegra124-car.txt
Documentation/devicetree/bindings/clock/qcom,lcc.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/qoriq-clock.txt
Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/renesas,rcar-gen2-cpg-clocks.txt
Documentation/devicetree/bindings/clock/sunxi.txt
Documentation/devicetree/bindings/clock/ti,cdce706.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/ti/fapll.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt
Documentation/devicetree/bindings/i2c/i2c-ocores.txt
Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
Documentation/devicetree/bindings/i2c/trivial-devices.txt
Documentation/devicetree/bindings/mips/cavium/cib.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mmc/sunxi-mmc.txt
Documentation/devicetree/bindings/thermal/exynos-thermal.txt
Documentation/devicetree/bindings/thermal/thermal.txt
Documentation/filesystems/Locking
Documentation/filesystems/overlayfs.txt
Documentation/i2c/functionality
Documentation/input/alps.txt
Documentation/kbuild/makefiles.txt
Documentation/x86/zero-page.txt
Kbuild
MAINTAINERS
Makefile
arch/arm/boot/dts/bcm-cygnus.dtsi
arch/arm/boot/dts/bcm63138.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a10s.dtsi
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/sun8i-a23.dtsi
arch/arm/kernel/perf_event_cpu.c
arch/arm/mach-at91/Kconfig
arch/arm/mach-at91/at91rm9200_time.c
arch/arm/mach-at91/generic.h
arch/arm/mach-at91/pm.c
arch/arm/mach-axxia/axxia.c
arch/arm/mach-bcm/Kconfig
arch/arm/mach-bcm/brcmstb.c
arch/arm/mach-davinci/Kconfig
arch/arm/mach-davinci/da8xx-dt.c
arch/arm/mach-davinci/mux.c
arch/arm/mach-exynos/exynos.c
arch/arm/mach-exynos/suspend.c
arch/arm/mach-highbank/highbank.c
arch/arm/mach-hisi/hisilicon.c
arch/arm/mach-imx/mmdc.c
arch/arm/mach-ixp4xx/include/mach/io.h
arch/arm/mach-keystone/keystone.c
arch/arm/mach-keystone/pm_domain.c
arch/arm/mach-mmp/time.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-mvebu/pmsu.c
arch/arm/mach-mvebu/system-controller.c
arch/arm/mach-nspire/nspire.c
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/cclock3xxx_data.c [deleted file]
arch/arm/mach-omap2/clock.c
arch/arm/mach-omap2/clock.h
arch/arm/mach-omap2/clock_common_data.c
arch/arm/mach-omap2/dpll3xxx.c
arch/arm/mach-omap2/dpll44xx.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-omap2/prm.h
arch/arm/mach-omap2/prm3xxx.c
arch/arm/mach-omap2/prm44xx.c
arch/arm/mach-omap2/prm_common.c
arch/arm/mach-prima2/Kconfig
arch/arm/mach-prima2/common.c
arch/arm/mach-prima2/platsmp.c
arch/arm/mach-rockchip/Kconfig
arch/arm/mach-rockchip/pm.h
arch/arm/mach-s5pv210/s5pv210.c
arch/arm/mach-shmobile/setup-emev2.c
arch/arm/mach-sti/Kconfig
arch/arm/mach-tegra/tegra.c
arch/arm/mach-ux500/pm_domains.c
arch/arm/mach-versatile/versatile_dt.c
arch/arm/mach-vexpress/Kconfig
arch/arm/mm/Kconfig
arch/arm/mm/dma-mapping.c
arch/blackfin/include/asm/bfin_rotary.h [deleted file]
arch/blackfin/mach-bf527/boards/ad7160eval.c
arch/blackfin/mach-bf527/boards/ezkit.c
arch/blackfin/mach-bf548/boards/ezkit.c
arch/blackfin/mach-bf609/boards/ezkit.c
arch/mips/Kconfig
arch/mips/Kconfig.debug
arch/mips/Makefile
arch/mips/alchemy/common/clock.c
arch/mips/alchemy/common/setup.c
arch/mips/bcm3384/irq.c
arch/mips/boot/Makefile
arch/mips/boot/elf2ecoff.c
arch/mips/cavium-octeon/csrc-octeon.c
arch/mips/cavium-octeon/dma-octeon.c
arch/mips/cavium-octeon/executive/cvmx-helper-board.c
arch/mips/cavium-octeon/octeon-irq.c
arch/mips/cavium-octeon/setup.c
arch/mips/configs/malta_qemu_32r6_defconfig [new file with mode: 0644]
arch/mips/fw/arc/misc.c
arch/mips/include/asm/Kbuild
arch/mips/include/asm/asmmacro.h
arch/mips/include/asm/atomic.h
arch/mips/include/asm/bitops.h
arch/mips/include/asm/checksum.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/compiler.h
arch/mips/include/asm/cpu-features.h
arch/mips/include/asm/cpu-info.h
arch/mips/include/asm/cpu-type.h
arch/mips/include/asm/cpu.h
arch/mips/include/asm/edac.h
arch/mips/include/asm/elf.h
arch/mips/include/asm/fpu.h
arch/mips/include/asm/futex.h
arch/mips/include/asm/gio_device.h
arch/mips/include/asm/hazards.h
arch/mips/include/asm/irqflags.h
arch/mips/include/asm/local.h
arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
arch/mips/include/asm/mach-cavium-octeon/war.h
arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
arch/mips/include/asm/mips-r2-to-r6-emul.h [new file with mode: 0644]
arch/mips/include/asm/mipsregs.h
arch/mips/include/asm/mmu.h
arch/mips/include/asm/mmu_context.h
arch/mips/include/asm/module.h
arch/mips/include/asm/octeon/cvmx-cmd-queue.h
arch/mips/include/asm/octeon/cvmx-rst-defs.h [new file with mode: 0644]
arch/mips/include/asm/octeon/octeon-model.h
arch/mips/include/asm/octeon/octeon.h
arch/mips/include/asm/pci.h
arch/mips/include/asm/pgtable-bits.h
arch/mips/include/asm/pgtable.h
arch/mips/include/asm/processor.h
arch/mips/include/asm/prom.h
arch/mips/include/asm/ptrace.h
arch/mips/include/asm/r4kcache.h
arch/mips/include/asm/sgialib.h
arch/mips/include/asm/siginfo.h [deleted file]
arch/mips/include/asm/spinlock.h
arch/mips/include/asm/spram.h
arch/mips/include/asm/stackframe.h
arch/mips/include/asm/switch_to.h
arch/mips/include/asm/thread_info.h
arch/mips/include/uapi/asm/inst.h
arch/mips/include/uapi/asm/siginfo.h
arch/mips/kernel/Makefile
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/branch.c
arch/mips/kernel/cevt-r4k.c
arch/mips/kernel/cps-vec.S
arch/mips/kernel/cpu-bugs64.c
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/elf.c
arch/mips/kernel/entry.S
arch/mips/kernel/genex.S
arch/mips/kernel/idle.c
arch/mips/kernel/mips-r2-to-r6-emul.c [new file with mode: 0644]
arch/mips/kernel/mips_ksyms.c
arch/mips/kernel/octeon_switch.S
arch/mips/kernel/proc.c
arch/mips/kernel/process.c
arch/mips/kernel/r4k_fpu.S
arch/mips/kernel/r4k_switch.S
arch/mips/kernel/spram.c
arch/mips/kernel/syscall.c
arch/mips/kernel/traps.c
arch/mips/kernel/unaligned.c
arch/mips/lib/Makefile
arch/mips/lib/memcpy.S
arch/mips/lib/memset.S
arch/mips/lib/mips-atomic.c
arch/mips/math-emu/cp1emu.c
arch/mips/mm/c-r4k.c
arch/mips/mm/fault.c
arch/mips/mm/page.c
arch/mips/mm/sc-mips.c
arch/mips/mm/tlb-r4k.c
arch/mips/mm/tlbex.c
arch/mips/mm/uasm-micromips.c
arch/mips/mm/uasm-mips.c
arch/mips/mm/uasm.c
arch/mips/mti-sead3/sead3-time.c
arch/mips/pci/pci-bcm1480.c
arch/mips/pci/pci-octeon.c
arch/mips/pci/pcie-octeon.c
arch/mips/pmcs-msp71xx/Kconfig
arch/mips/sgi-ip22/ip22-gio.c
arch/mips/sgi-ip27/ip27-reset.c
arch/mips/sgi-ip32/ip32-reset.c
arch/parisc/Makefile
arch/powerpc/Makefile
arch/powerpc/configs/corenet32_smp_defconfig
arch/powerpc/configs/corenet64_smp_defconfig
arch/powerpc/kernel/time.c
arch/powerpc/platforms/512x/clock-commonclk.c
arch/s390/hypfs/inode.c
arch/s390/include/asm/topology.h
arch/s390/kernel/cache.c
arch/s390/kernel/early.c
arch/s390/kernel/setup.c
arch/s390/kernel/smp.c
arch/s390/kernel/topology.c
arch/s390/kernel/vdso64/clock_gettime.S
arch/s390/mm/mmap.c
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/Makefile.um
arch/x86/boot/compressed/Makefile
arch/x86/boot/compressed/aslr.c
arch/x86/boot/compressed/efi_stub_64.S
arch/x86/boot/compressed/efi_thunk_64.S [new file with mode: 0644]
arch/x86/boot/compressed/misc.c
arch/x86/boot/compressed/misc.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/imr.h [new file with mode: 0644]
arch/x86/include/asm/page_types.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/spinlock.h
arch/x86/include/uapi/asm/bootparam.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/microcode/intel_early.c
arch/x86/kernel/irq.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/kvm.c
arch/x86/kernel/module.c
arch/x86/kernel/setup.c
arch/x86/kernel/uprobes.c
arch/x86/mm/init.c
arch/x86/mm/mmap.c
arch/x86/platform/Makefile
arch/x86/platform/efi/efi_stub_64.S
arch/x86/platform/efi/efi_thunk_64.S
arch/x86/platform/intel-quark/Makefile [new file with mode: 0644]
arch/x86/platform/intel-quark/imr.c [new file with mode: 0644]
arch/x86/platform/intel-quark/imr_selftest.c [new file with mode: 0644]
arch/x86/xen/spinlock.c
block/blk-throttle.c
drivers/acpi/Makefile
drivers/acpi/acpi_lpat.c [new file with mode: 0644]
drivers/acpi/acpi_lpss.c
drivers/acpi/ec.c
drivers/acpi/pmic/intel_pmic.c
drivers/acpi/resource.c
drivers/acpi/video.c
drivers/block/nvme-core.c
drivers/block/nvme-scsi.c
drivers/block/rbd.c
drivers/char/ipmi/ipmi_devintf.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_si_intf.c
drivers/char/ipmi/ipmi_ssif.c
drivers/clk/Kconfig
drivers/clk/Makefile
drivers/clk/at91/clk-programmable.c
drivers/clk/bcm/clk-kona.c
drivers/clk/clk-asm9260.c [new file with mode: 0644]
drivers/clk/clk-cdce706.c [new file with mode: 0644]
drivers/clk/clk-composite.c
drivers/clk/clk-divider.c
drivers/clk/clk-gate.c
drivers/clk/clk-mux.c
drivers/clk/clk-ppc-corenet.c [deleted file]
drivers/clk/clk-qoriq.c [new file with mode: 0644]
drivers/clk/clk.c
drivers/clk/clk.h
drivers/clk/clkdev.c
drivers/clk/hisilicon/clk-hi3620.c
drivers/clk/mmp/clk-mix.c
drivers/clk/pxa/Makefile
drivers/clk/pxa/clk-pxa.c
drivers/clk/pxa/clk-pxa3xx.c [new file with mode: 0644]
drivers/clk/qcom/Kconfig
drivers/clk/qcom/Makefile
drivers/clk/qcom/clk-pll.c
drivers/clk/qcom/clk-rcg.c
drivers/clk/qcom/clk-rcg2.c
drivers/clk/qcom/clk-regmap-divider.c [new file with mode: 0644]
drivers/clk/qcom/clk-regmap-divider.h [new file with mode: 0644]
drivers/clk/qcom/clk-regmap-mux.c [new file with mode: 0644]
drivers/clk/qcom/clk-regmap-mux.h [new file with mode: 0644]
drivers/clk/qcom/gcc-ipq806x.c
drivers/clk/qcom/lcc-ipq806x.c [new file with mode: 0644]
drivers/clk/qcom/lcc-msm8960.c [new file with mode: 0644]
drivers/clk/rockchip/clk-rk3288.c
drivers/clk/samsung/clk-exynos-audss.c
drivers/clk/samsung/clk-exynos3250.c
drivers/clk/samsung/clk-exynos4.c
drivers/clk/samsung/clk-exynos4415.c
drivers/clk/samsung/clk-exynos7.c
drivers/clk/samsung/clk.c
drivers/clk/samsung/clk.h
drivers/clk/shmobile/Makefile
drivers/clk/shmobile/clk-div6.c
drivers/clk/shmobile/clk-r8a73a4.c [new file with mode: 0644]
drivers/clk/shmobile/clk-rcar-gen2.c
drivers/clk/st/clk-flexgen.c
drivers/clk/st/clkgen-mux.c
drivers/clk/sunxi/Makefile
drivers/clk/sunxi/clk-factors.c
drivers/clk/sunxi/clk-factors.h
drivers/clk/sunxi/clk-mod0.c
drivers/clk/sunxi/clk-sun6i-ar100.c
drivers/clk/sunxi/clk-sun8i-mbus.c
drivers/clk/sunxi/clk-sun9i-core.c
drivers/clk/sunxi/clk-sun9i-mmc.c [new file with mode: 0644]
drivers/clk/sunxi/clk-sunxi.c
drivers/clk/tegra/Makefile
drivers/clk/tegra/clk-id.h
drivers/clk/tegra/clk-periph.c
drivers/clk/tegra/clk-pll.c
drivers/clk/tegra/clk-tegra-periph.c
drivers/clk/tegra/clk-tegra114.c
drivers/clk/tegra/clk-tegra124.c
drivers/clk/tegra/clk.c
drivers/clk/ti/Makefile
drivers/clk/ti/clk-3xxx-legacy.c [new file with mode: 0644]
drivers/clk/ti/clk-3xxx.c
drivers/clk/ti/clk-44xx.c
drivers/clk/ti/clk-54xx.c
drivers/clk/ti/clk-7xx.c
drivers/clk/ti/clk-816x.c [new file with mode: 0644]
drivers/clk/ti/clk.c
drivers/clk/ti/clock.h [new file with mode: 0644]
drivers/clk/ti/composite.c
drivers/clk/ti/divider.c
drivers/clk/ti/dpll.c
drivers/clk/ti/fapll.c [new file with mode: 0644]
drivers/clk/ti/gate.c
drivers/clk/ti/interface.c
drivers/clk/ti/mux.c
drivers/clk/ux500/clk-prcc.c
drivers/clk/ux500/clk-prcmu.c
drivers/clk/zynq/clkc.c
drivers/connector/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Kconfig.powerpc
drivers/cpufreq/Makefile
drivers/cpufreq/exynos-cpufreq.c
drivers/cpufreq/s3c2416-cpufreq.c
drivers/cpufreq/s3c24xx-cpufreq.c
drivers/cpuidle/cpuidle-powernv.c
drivers/edac/amd64_edac.c
drivers/edac/sb_edac.c
drivers/firmware/efi/libstub/efi-stub-helper.c
drivers/hid/hid-core.c
drivers/hid/hid-ids.h
drivers/hid/hid-microsoft.c
drivers/hid/hid-saitek.c
drivers/hid/hid-sensor-hub.c
drivers/hid/hid-sony.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/wacom_wac.c
drivers/hwmon/Kconfig
drivers/hwmon/pmbus/Kconfig
drivers/i2c/Kconfig
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-bcm-iproc.c [new file with mode: 0644]
drivers/i2c/busses/i2c-cadence.c
drivers/i2c/busses/i2c-designware-baytrail.c [new file with mode: 0644]
drivers/i2c/busses/i2c-designware-core.c
drivers/i2c/busses/i2c-designware-core.h
drivers/i2c/busses/i2c-designware-pcidrv.c
drivers/i2c/busses/i2c-designware-platdrv.c
drivers/i2c/busses/i2c-imx.c
drivers/i2c/busses/i2c-ocores.c
drivers/i2c/busses/i2c-pmcmsp.c
drivers/i2c/busses/i2c-rk3x.c
drivers/i2c/busses/i2c-tegra.c
drivers/i2c/i2c-core.c
drivers/i2c/muxes/i2c-mux-pca954x.c
drivers/iio/Kconfig
drivers/infiniband/core/ucma.c
drivers/infiniband/core/umem_odp.c
drivers/infiniband/core/uverbs.h
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/ipath/ipath_fs.c
drivers/infiniband/hw/ipath/ipath_kernel.h
drivers/infiniband/hw/ipath/ipath_wc_ppc64.c
drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
drivers/infiniband/hw/mlx4/cm.c
drivers/infiniband/hw/mlx4/cq.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/ocrdma/ocrdma.h
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_ah.h
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.h
drivers/infiniband/hw/ocrdma/ocrdma_main.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_stats.c
drivers/infiniband/hw/ocrdma/ocrdma_stats.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qib/qib.h
drivers/infiniband/hw/qib/qib_common.h
drivers/infiniband/hw/qib/qib_debugfs.c
drivers/infiniband/hw/qib/qib_diag.c
drivers/infiniband/hw/qib/qib_driver.c
drivers/infiniband/hw/qib/qib_eeprom.c
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/qib/qib_fs.c
drivers/infiniband/hw/qib/qib_iba6120.c
drivers/infiniband/hw/qib/qib_iba7220.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_intr.c
drivers/infiniband/hw/qib/qib_keys.c
drivers/infiniband/hw/qib/qib_mad.c
drivers/infiniband/hw/qib/qib_mmap.c
drivers/infiniband/hw/qib/qib_mr.c
drivers/infiniband/hw/qib/qib_pcie.c
drivers/infiniband/hw/qib/qib_qp.c
drivers/infiniband/hw/qib/qib_qsfp.c
drivers/infiniband/hw/qib/qib_rc.c
drivers/infiniband/hw/qib/qib_ruc.c
drivers/infiniband/hw/qib/qib_sd7220.c
drivers/infiniband/hw/qib/qib_sysfs.c
drivers/infiniband/hw/qib/qib_twsi.c
drivers/infiniband/hw/qib/qib_tx.c
drivers/infiniband/hw/qib/qib_ud.c
drivers/infiniband/hw/qib/qib_user_sdma.c
drivers/infiniband/hw/qib/qib_verbs.c
drivers/infiniband/hw/qib/qib_verbs_mcast.c
drivers/infiniband/hw/qib/qib_wc_x86_64.c
drivers/infiniband/ulp/iser/iscsi_iser.h
drivers/infiniband/ulp/iser/iser_initiator.c
drivers/infiniband/ulp/iser/iser_memory.c
drivers/infiniband/ulp/iser/iser_verbs.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/input/joystick/adi.c
drivers/input/keyboard/pxa27x_keypad.c
drivers/input/misc/bfin_rotary.c
drivers/input/misc/soc_button_array.c
drivers/input/mouse/alps.c
drivers/input/mouse/alps.h
drivers/input/mouse/cypress_ps2.c
drivers/input/mouse/cypress_ps2.h
drivers/input/mouse/focaltech.c
drivers/input/mouse/focaltech.h
drivers/input/mouse/psmouse-base.c
drivers/input/mouse/synaptics.c
drivers/input/mouse/synaptics.h
drivers/irqchip/irq-mips-gic.c
drivers/isdn/hardware/mISDN/Kconfig
drivers/md/Kconfig
drivers/md/dm-crypt.c
drivers/md/dm-io.c
drivers/md/dm-raid1.c
drivers/md/dm-snap.c
drivers/md/dm.c
drivers/md/persistent-data/Kconfig
drivers/md/persistent-data/dm-space-map-disk.c
drivers/mmc/host/sunxi-mmc.c
drivers/net/ethernet/ti/Kconfig
drivers/net/usb/Kconfig
drivers/net/wireless/rt2x00/Kconfig
drivers/pci/pcie/aer/Kconfig
drivers/platform/x86/Kconfig
drivers/platform/x86/asus-laptop.c
drivers/platform/x86/classmate-laptop.c
drivers/platform/x86/fujitsu-laptop.c
drivers/platform/x86/intel_scu_ipc.c
drivers/platform/x86/samsung-laptop.c
drivers/platform/x86/sony-laptop.c
drivers/platform/x86/thinkpad_acpi.c
drivers/platform/x86/toshiba_acpi.c
drivers/pnp/resource.c
drivers/rtc/Kconfig
drivers/scsi/am53c974.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/hpsa.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/sg.c
drivers/scsi/wd719x.c
drivers/spi/Kconfig
drivers/staging/board/Kconfig
drivers/staging/emxx_udc/Kconfig
drivers/staging/iio/Kconfig
drivers/staging/lustre/lustre/llite/dcache.c
drivers/staging/lustre/lustre/llite/file.c
drivers/staging/lustre/lustre/llite/llite_internal.h
drivers/staging/lustre/lustre/llite/namei.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_core.h [deleted file]
drivers/target/iscsi/iscsi_target_datain_values.c
drivers/target/iscsi/iscsi_target_device.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_nodeattrib.c
drivers/target/iscsi/iscsi_target_parameters.c
drivers/target/iscsi/iscsi_target_seq_pdu_list.c
drivers/target/iscsi/iscsi_target_stat.c
drivers/target/iscsi/iscsi_target_stat.h [deleted file]
drivers/target/iscsi/iscsi_target_tmr.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/iscsi/iscsi_target_tq.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/iscsi/iscsi_target_util.h
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_pr.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/thermal/int340x_thermal/Makefile
drivers/thermal/int340x_thermal/int3400_thermal.c
drivers/thermal/int340x_thermal/int3402_thermal.c
drivers/thermal/int340x_thermal/int3403_thermal.c
drivers/thermal/int340x_thermal/int340x_thermal_zone.c [new file with mode: 0644]
drivers/thermal/int340x_thermal/int340x_thermal_zone.h [new file with mode: 0644]
drivers/thermal/int340x_thermal/processor_thermal_device.c
drivers/thermal/intel_soc_dts_thermal.c
drivers/thermal/of-thermal.c
drivers/thermal/rockchip_thermal.c
drivers/thermal/samsung/Kconfig
drivers/thermal/samsung/Makefile
drivers/thermal/samsung/exynos_thermal_common.c [deleted file]
drivers/thermal/samsung/exynos_thermal_common.h [deleted file]
drivers/thermal/samsung/exynos_tmu.c
drivers/thermal/samsung/exynos_tmu.h
drivers/thermal/samsung/exynos_tmu_data.c [deleted file]
drivers/thermal/step_wise.c
drivers/tty/serial/Kconfig
drivers/usb/gadget/Kconfig
drivers/usb/gadget/legacy/Kconfig
drivers/usb/gadget/udc/Kconfig
drivers/usb/phy/Kconfig
drivers/vfio/pci/vfio_pci.c
drivers/vfio/pci/vfio_pci_intrs.c
drivers/vfio/pci/vfio_pci_private.h
drivers/vfio/vfio.c
drivers/vfio/vfio_iommu_type1.c
drivers/vhost/scsi.c
fs/9p/vfs_inode.c
fs/aio.c
fs/autofs4/dev-ioctl.c
fs/autofs4/expire.c
fs/autofs4/root.c
fs/bad_inode.c
fs/binfmt_elf.c
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/btrfs_inode.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/free-space-cache.c
fs/btrfs/inode-item.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/raid56.c
fs/btrfs/raid56.h
fs/btrfs/reada.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/tests/extent-buffer-tests.c
fs/btrfs/tests/extent-io-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/tests/qgroup-tests.c
fs/btrfs/transaction.c
fs/btrfs/transaction.h
fs/btrfs/tree-log.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/cachefiles/daemon.c
fs/cachefiles/interface.c
fs/cachefiles/namei.c
fs/cachefiles/rdwr.c
fs/ceph/acl.c
fs/ceph/addr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/snap.c
fs/ceph/super.c
fs/ceph/super.h
fs/coda/dir.c
fs/configfs/configfs_internal.h
fs/configfs/dir.c
fs/configfs/file.c
fs/configfs/inode.c
fs/coredump.c
fs/dcache.c
fs/debugfs/inode.c
fs/ecryptfs/file.c
fs/ecryptfs/inode.c
fs/exportfs/expfs.c
fs/ext4/ext4.h
fs/ext4/indirect.c
fs/ext4/inode.c
fs/ext4/super.c
fs/fs-writeback.c
fs/fuse/dir.c
fs/gfs2/dir.c
fs/hfsplus/dir.c
fs/hppfs/hppfs.c
fs/internal.h
fs/jbd2/recovery.c
fs/jffs2/dir.c
fs/jffs2/super.c
fs/libfs.c
fs/namei.c
fs/namespace.c
fs/nfs/callback_proc.c
fs/nfs/callback_xdr.c
fs/nfs/delegation.c
fs/nfs/direct.c
fs/nfs/filelayout/filelayout.c
fs/nfs/flexfilelayout/flexfilelayout.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs4proc.c
fs/nfs/nfs4session.c
fs/nfs/nfs4session.h
fs/nfs/nfs4xdr.c
fs/nfs/pnfs.h
fs/nfs/pnfs_nfs.c
fs/nfs/write.c
fs/nfsd/nfs4recover.c
fs/nfsd/nfsfh.c
fs/nfsd/vfs.c
fs/notify/fanotify/fanotify.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/overlayfs.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/posix_acl.c
fs/proc/generic.c
fs/proc/inode.c
fs/proc/internal.h
fs/reiserfs/xattr.c
fs/super.c
fs/xfs/Makefile
fs/xfs/xfs_export.c
fs/xfs/xfs_file.c
fs/xfs/xfs_fsops.c
fs/xfs/xfs_ioctl.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_iops.h
fs/xfs/xfs_mount.h
fs/xfs/xfs_pnfs.c [new file with mode: 0644]
fs/xfs/xfs_pnfs.h [new file with mode: 0644]
include/acpi/acpi_lpat.h [new file with mode: 0644]
include/dt-bindings/clock/alphascale,asm9260.h [new file with mode: 0644]
include/dt-bindings/clock/exynos4.h
include/dt-bindings/clock/exynos7-clk.h
include/dt-bindings/clock/qcom,gcc-ipq806x.h
include/dt-bindings/clock/qcom,lcc-ipq806x.h [new file with mode: 0644]
include/dt-bindings/clock/qcom,lcc-msm8960.h [new file with mode: 0644]
include/dt-bindings/clock/tegra124-car-common.h [new file with mode: 0644]
include/dt-bindings/clock/tegra124-car.h
include/dt-bindings/thermal/thermal_exynos.h [new file with mode: 0644]
include/linux/ceph/ceph_fs.h
include/linux/ceph/libceph.h
include/linux/ceph/messenger.h
include/linux/ceph/mon_client.h
include/linux/clk-private.h [deleted file]
include/linux/clk-provider.h
include/linux/clk.h
include/linux/clk/sunxi.h [deleted file]
include/linux/clk/tegra.h
include/linux/clk/ti.h
include/linux/compiler.h
include/linux/dcache.h
include/linux/hid-sensor-hub.h
include/linux/i2c.h
include/linux/irqchip/mips-gic.h
include/linux/kdb.h
include/linux/nfs_fs.h
include/linux/nfs_xdr.h
include/linux/nvme.h
include/linux/platform_data/bfin_rotary.h [new file with mode: 0644]
include/linux/sched.h
include/linux/sunrpc/metrics.h
include/linux/vfio.h
include/target/iscsi/iscsi_target_core.h [new file with mode: 0644]
include/target/iscsi/iscsi_target_stat.h [new file with mode: 0644]
include/target/iscsi/iscsi_transport.h
include/target/target_core_base.h
include/uapi/linux/btrfs.h
include/uapi/linux/nvme.h
include/uapi/linux/prctl.h
include/uapi/linux/vfio.h
include/uapi/rdma/ib_user_verbs.h
init/Kconfig
kernel/debug/debug_core.c
kernel/debug/kdb/kdb_io.c
kernel/debug/kdb/kdb_main.c
kernel/debug/kdb/kdb_private.h
kernel/gcov/Makefile
kernel/livepatch/core.c
kernel/locking/rtmutex.c
kernel/printk/printk.c
kernel/rcu/tree_plugin.h
kernel/sched/auto_group.c
kernel/sched/completion.c
kernel/sched/core.c
kernel/sched/deadline.c
kernel/sched/sched.h
kernel/sys.c
kernel/time/ntp.c
lib/Kconfig
mm/Kconfig
mm/shmem.c
net/Kconfig
net/ceph/ceph_common.c
net/ceph/ceph_strings.c
net/ceph/debugfs.c
net/ceph/messenger.c
net/ceph/mon_client.c
net/ceph/osd_client.c
net/sched/Kconfig
net/sunrpc/backchannel_rqst.c
net/switchdev/Kconfig
scripts/Kbuild.include
scripts/Makefile.clean
scripts/kconfig/confdata.c
scripts/kconfig/merge_config.sh
scripts/package/builddeb
security/apparmor/include/apparmor.h
security/apparmor/lsm.c
security/apparmor/path.c
security/inode.c
security/integrity/Kconfig
security/integrity/evm/Kconfig
security/selinux/hooks.c
security/smack/smack_lsm.c
security/tomoyo/file.c
sound/core/seq/seq_midi_emul.c
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_realtek.c
sound/pci/rme9652/hdspm.c
sound/soc/intel/sst-haswell-pcm.c
sound/usb/clock.c
sound/usb/line6/driver.c
sound/usb/line6/driver.h
sound/usb/quirks.c
sound/usb/quirks.h

index 678819a3f8bf89e3b43e134979af2fa3f28199fd..63c1ad0212fc8624432f1691240443b9015b5d3f 100644 (file)
@@ -35,3 +35,11 @@ Contact:     Corentin Chary <corentin.chary@gmail.com>
 Description:   Use your USB ports to charge devices, even
                when your laptop is powered off.
                1 means enabled, 0 means disabled.
+
+What:          /sys/devices/platform/samsung/lid_handling
+Date:          December 11, 2014
+KernelVersion: 3.19
+Contact:       Julijonas Kikutis <julijonas.kikutis@gmail.com>
+Description:   Some Samsung laptops handle lid closing quicker and
+               only handle lid opening with this mode enabled.
+               1 means enabled, 0 means disabled.
diff --git a/Documentation/ABI/testing/sysfs-driver-toshiba_acpi b/Documentation/ABI/testing/sysfs-driver-toshiba_acpi
new file mode 100644 (file)
index 0000000..ca9c71a
--- /dev/null
@@ -0,0 +1,114 @@
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_backlight_mode
+Date:          June 8, 2014
+KernelVersion: 3.15
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls the keyboard backlight operation mode, valid
+               values are:
+                       * 0x1  -> FN-Z
+                       * 0x2  -> AUTO (also called TIMER)
+                       * 0x8  -> ON
+                       * 0x10 -> OFF
+               Note that the kernel 3.16 onwards this file accepts all listed
+               parameters, kernel 3.15 only accepts the first two (FN-Z and
+               AUTO).
+Users:         KToshiba
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_backlight_timeout
+Date:          June 8, 2014
+KernelVersion: 3.15
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls the timeout of the keyboard backlight
+               whenever the operation mode is set to AUTO (or TIMER),
+               valid values range from 0-60.
+               Note that the kernel 3.15 only had support for the first
+               keyboard type, the kernel 3.16 added support for the second
+               type and the range accepted for type 2 is 1-60.
+               See the entry named "kbd_type"
+Users:         KToshiba
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/position
+Date:          June 8, 2014
+KernelVersion: 3.15
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file shows the absolute position of the built-in
+               accelereometer.
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/touchpad
+Date:          June 8, 2014
+KernelVersion: 3.15
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This files controls the status of the touchpad and pointing
+               stick (if available), valid values are:
+                       * 0 -> OFF
+                       * 1 -> ON
+Users:         KToshiba
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/available_kbd_modes
+Date:          August 3, 2014
+KernelVersion: 3.16
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file shows the supported keyboard backlight modes
+               the system supports, which can be:
+                       * 0x1  -> FN-Z
+                       * 0x2  -> AUTO (also called TIMER)
+                       * 0x8  -> ON
+                       * 0x10 -> OFF
+               Note that not all keyboard types support the listed modes.
+               See the entry named "available_kbd_modes"
+Users:         KToshiba
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_type
+Date:          August 3, 2014
+KernelVersion: 3.16
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file shows the current keyboard backlight type,
+               which can be:
+                       * 1 -> Type 1, supporting modes FN-Z and AUTO
+                       * 2 -> Type 2, supporting modes TIMER, ON and OFF
+Users:         KToshiba
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/version
+Date:          February, 2015
+KernelVersion: 3.20
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file shows the current version of the driver
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/fan
+Date:          February, 2015
+KernelVersion: 3.20
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls the state of the internal fan, valid
+               values are:
+                       * 0 -> OFF
+                       * 1 -> ON
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/kbd_function_keys
+Date:          February, 2015
+KernelVersion: 3.20
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls the Special Functions (hotkeys) operation
+               mode, valid values are:
+                       * 0 -> Normal Operation
+                       * 1 -> Special Functions
+               In the "Normal Operation" mode, the F{1-12} keys are as usual
+               and the hotkeys are accessed via FN-F{1-12}.
+               In the "Special Functions" mode, the F{1-12} keys trigger the
+               hotkey and the F{1-12} keys are accessed via FN-F{1-12}.
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/panel_power_on
+Date:          February, 2015
+KernelVersion: 3.20
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls whether the laptop should turn ON whenever
+               the LID is opened, valid values are:
+                       * 0 -> Disabled
+                       * 1 -> Enabled
+
+What:          /sys/devices/LNXSYSTM:00/LNXSYBUS:00/TOS{1900,620{0,7,8}}:00/usb_three
+Date:          February, 2015
+KernelVersion: 3.20
+Contact:       Azael Avalos <coproscefalo@gmail.com>
+Description:   This file controls whether the USB 3 functionality, valid
+               values are:
+                       * 0 -> Disabled (Acts as a regular USB 2)
+                       * 1 -> Enabled (Full USB 3 functionality)
index 2428cc04dbc84b5f7243558296f6f36fa9621fb4..f3abca7ec53d62e4bd416da9f53395aef9269855 100644 (file)
    may be configured as a kernel built-in or a kernel loadable module.
    You can only make use of <constant>kgdbwait</constant> and early
    debugging if you build kgdboc into the kernel as a built-in.
+   </para>
    <para>Optionally you can elect to activate kms (Kernel Mode
    Setting) integration.  When you use kms with kgdboc and you have a
    video driver that has atomic mode setting hooks, it is possible to
    crashes or doing analysis of memory with kdb while allowing the
    full graphics console applications to run.
    </para>
-   </para>
    <sect2 id="kgdbocArgs">
    <title>kgdboc arguments</title>
    <para>Usage: <constant>kgdboc=[kms][[,]kbd][[,]serial_device][,baud]</constant></para>
    </listitem>
    </orderedlist>
    </para>
-   </sect3>
    <para>NOTE: Kgdboc does not support interrupting the target via the
    gdb remote protocol.  You must manually send a sysrq-g unless you
    have a proxy that splits console output to a terminal program.
     as well as on the initial connect, or to use a debugger proxy that
     allows an unmodified gdb to do the debugging.
    </para>
+   </sect3>
    </sect2>
    </sect1>
    <sect1 id="kgdbwait">
    </para>
    </listitem>
    </orderedlist>
+  </para>
    <para>IMPORTANT NOTE: You cannot use kgdboc + kgdbcon on a tty that is an
    active system console.  An example of incorrect usage is <constant>console=ttyS0,115200 kgdboc=ttyS0 kgdbcon</constant>
    </para>
    <para>It is possible to use this option with kgdboc on a tty that is not a system console.
    </para>
-  </para>
   </sect1>
    <sect1 id="kgdbreboot">
    <title>Run time parameter: kgdbreboot</title>
index 4ff84623d5e16eb42c17f90eb1851eea19b9d82f..0e4f90aa1c136eaa40c9d93e3586bbffdd180de4 100644 (file)
@@ -73,6 +73,8 @@ the operations defined in clk.h:
                                                unsigned long *parent_rate);
                long            (*determine_rate)(struct clk_hw *hw,
                                                unsigned long rate,
+                                               unsigned long min_rate,
+                                               unsigned long max_rate,
                                                unsigned long *best_parent_rate,
                                                struct clk_hw **best_parent_clk);
                int             (*set_parent)(struct clk_hw *hw, u8 index);
index c81839b52c4dd0102d6444c464359690f395778e..ad697781f9ac478477cfed76978b047685eda2b6 100644 (file)
@@ -51,7 +51,7 @@ Parameters: <cipher> <key> <iv_offset> <device path> \
     Otherwise #opt_params is the number of following arguments.
 
     Example of optional parameters section:
-        1 allow_discards
+        3 allow_discards same_cpu_crypt submit_from_crypt_cpus
 
 allow_discards
     Block discard requests (a.k.a. TRIM) are passed through the crypt device.
@@ -63,6 +63,19 @@ allow_discards
     used space etc.) if the discarded blocks can be located easily on the
     device later.
 
+same_cpu_crypt
+    Perform encryption using the same cpu that IO was submitted on.
+    The default is to use an unbound workqueue so that encryption work
+    is automatically balanced between available CPUs.
+
+submit_from_crypt_cpus
+    Disable offloading writes to a separate thread after encryption.
+    There are some situations where offloading write bios from the
+    encryption threads to a single thread degrades performance
+    significantly.  The default is to offload write bios to the same
+    thread because it benefits CFQ to have writes submitted using the
+    same context.
+
 Example scripts
 ===============
 LUKS (Linux Unified Key Setup) is now the preferred way to set up disk
index 6d3d5f80c1c3186ba35a7c28dd950e2f754e3027..6bf1e7493f61febb762a212d6306dcaa03fb00c4 100644 (file)
@@ -34,6 +34,8 @@ Required Properties for Clock Controller:
        - "samsung,exynos7-clock-peris"
        - "samsung,exynos7-clock-fsys0"
        - "samsung,exynos7-clock-fsys1"
+       - "samsung,exynos7-clock-mscl"
+       - "samsung,exynos7-clock-aud"
 
  - reg: physical base address of the controller and the length of
        memory mapped region.
@@ -53,6 +55,7 @@ Input clocks for top0 clock controller:
        - dout_sclk_bus1_pll
        - dout_sclk_cc_pll
        - dout_sclk_mfc_pll
+       - dout_sclk_aud_pll
 
 Input clocks for top1 clock controller:
        - fin_pll
@@ -76,6 +79,14 @@ Input clocks for peric1 clock controller:
        - sclk_uart1
        - sclk_uart2
        - sclk_uart3
+       - sclk_spi0
+       - sclk_spi1
+       - sclk_spi2
+       - sclk_spi3
+       - sclk_spi4
+       - sclk_i2s1
+       - sclk_pcm1
+       - sclk_spdif
 
 Input clocks for peris clock controller:
        - fin_pll
@@ -91,3 +102,7 @@ Input clocks for fsys1 clock controller:
        - dout_aclk_fsys1_200
        - dout_sclk_mmc0
        - dout_sclk_mmc1
+
+Input clocks for aud clock controller:
+       - fin_pll
+       - fout_aud_pll
index ded5d6212c84dfc0c6e9a5774b1f8e8ce975b0cb..c6620bc9670364315bcc8687ec81c016e7a89719 100644 (file)
@@ -1,4 +1,4 @@
-NVIDIA Tegra124 Clock And Reset Controller
+NVIDIA Tegra124 and Tegra132 Clock And Reset Controller
 
 This binding uses the common clock binding:
 Documentation/devicetree/bindings/clock/clock-bindings.txt
@@ -7,14 +7,16 @@ The CAR (Clock And Reset) Controller on Tegra is the HW module responsible
 for muxing and gating Tegra's clocks, and setting their rates.
 
 Required properties :
-- compatible : Should be "nvidia,tegra124-car"
+- compatible : Should be "nvidia,tegra124-car" or "nvidia,tegra132-car"
 - reg : Should contain CAR registers location and length
 - clocks : Should contain phandle and clock specifiers for two clocks:
   the 32 KHz "32k_in", and the board-specific oscillator "osc".
 - #clock-cells : Should be 1.
   In clock consumers, this cell represents the clock ID exposed by the
-  CAR. The assignments may be found in header file
-  <dt-bindings/clock/tegra124-car.h>.
+  CAR. The assignments may be found in the header files
+  <dt-bindings/clock/tegra124-car-common.h> (which covers IDs common
+  to Tegra124 and Tegra132) and <dt-bindings/clock/tegra124-car.h>
+  (for Tegra124-specific clocks).
 - #reset-cells : Should be 1.
   In clock consumers, this cell represents the bit number in the CAR's
   array of CLK_RST_CONTROLLER_RST_DEVICES_* registers.
diff --git a/Documentation/devicetree/bindings/clock/qcom,lcc.txt b/Documentation/devicetree/bindings/clock/qcom,lcc.txt
new file mode 100644 (file)
index 0000000..dd755be
--- /dev/null
@@ -0,0 +1,21 @@
+Qualcomm LPASS Clock & Reset Controller Binding
+------------------------------------------------
+
+Required properties :
+- compatible : shall contain only one of the following:
+
+                       "qcom,lcc-msm8960"
+                       "qcom,lcc-apq8064"
+                       "qcom,lcc-ipq8064"
+
+- reg : shall contain base register location and length
+- #clock-cells : shall contain 1
+- #reset-cells : shall contain 1
+
+Example:
+       clock-controller@28000000 {
+               compatible = "qcom,lcc-ipq8064";
+               reg = <0x28000000 0x1000>;
+               #clock-cells = <1>;
+               #reset-cells = <1>;
+       };
index 266ff9d232293a976de9e6a851e1bd7d1f2de7d0..df4a259a6898c5d7fe62fd5ad7e3ef323b5719b6 100644 (file)
@@ -1,6 +1,6 @@
-* Clock Block on Freescale CoreNet Platforms
+* Clock Block on Freescale QorIQ Platforms
 
-Freescale CoreNet chips take primary clocking input from the external
+Freescale qoriq chips take primary clocking input from the external
 SYSCLK signal. The SYSCLK input (frequency) is multiplied using
 multiple phase locked loops (PLL) to create a variety of frequencies
 which can then be passed to a variety of internal logic, including
@@ -29,6 +29,7 @@ Required properties:
        * "fsl,t4240-clockgen"
        * "fsl,b4420-clockgen"
        * "fsl,b4860-clockgen"
+       * "fsl,ls1021a-clockgen"
        Chassis clock strings include:
        * "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks
        * "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks
index 2e18676bd4b56503ce42ec4b4d368c32041fe1d0..0a80fa70ca265c0f2c7b7657d7270f47d6f53a45 100644 (file)
@@ -11,6 +11,7 @@ Required Properties:
 
   - compatible: Must be one of the following
     - "renesas,r7s72100-mstp-clocks" for R7S72100 (RZ) MSTP gate clocks
+    - "renesas,r8a73a4-mstp-clocks" for R8A73A4 (R-Mobile APE6) MSTP gate clocks
     - "renesas,r8a7740-mstp-clocks" for R8A7740 (R-Mobile A1) MSTP gate clocks
     - "renesas,r8a7779-mstp-clocks" for R8A7779 (R-Car H1) MSTP gate clocks
     - "renesas,r8a7790-mstp-clocks" for R8A7790 (R-Car H2) MSTP gate clocks
diff --git a/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,r8a73a4-cpg-clocks.txt
new file mode 100644 (file)
index 0000000..ece9239
--- /dev/null
@@ -0,0 +1,33 @@
+* Renesas R8A73A4 Clock Pulse Generator (CPG)
+
+The CPG generates core clocks for the R8A73A4 SoC. It includes five PLLs
+and several fixed ratio dividers.
+
+Required Properties:
+
+  - compatible: Must be "renesas,r8a73a4-cpg-clocks"
+
+  - reg: Base address and length of the memory resource used by the CPG
+
+  - clocks: Reference to the parent clocks ("extal1" and "extal2")
+
+  - #clock-cells: Must be 1
+
+  - clock-output-names: The names of the clocks. Supported clocks are "main",
+    "pll0", "pll1", "pll2", "pll2s", "pll2h", "z", "z2", "i", "m3", "b",
+    "m1", "m2", "zx", "zs", and "hp".
+
+
+Example
+-------
+
+        cpg_clocks: cpg_clocks@e6150000 {
+                compatible = "renesas,r8a73a4-cpg-clocks";
+                reg = <0 0xe6150000 0 0x10000>;
+                clocks = <&extal1_clk>, <&extal2_clk>;
+                #clock-cells = <1>;
+                clock-output-names = "main", "pll0", "pll1", "pll2",
+                                     "pll2s", "pll2h", "z", "z2",
+                                     "i", "m3", "b", "m1", "m2",
+                                     "zx", "zs", "hp";
+        };
index e6ad35b894f919f537dbf58cbdea6fffd9cddfc9..b02944fba9de4f8696d9f6ac7845acb96937aeed 100644 (file)
@@ -8,15 +8,18 @@ Required Properties:
   - compatible: Must be one of
     - "renesas,r8a7790-cpg-clocks" for the r8a7790 CPG
     - "renesas,r8a7791-cpg-clocks" for the r8a7791 CPG
+    - "renesas,r8a7793-cpg-clocks" for the r8a7793 CPG
     - "renesas,r8a7794-cpg-clocks" for the r8a7794 CPG
     - "renesas,rcar-gen2-cpg-clocks" for the generic R-Car Gen2 CPG
 
   - reg: Base address and length of the memory resource used by the CPG
 
-  - clocks: Reference to the parent clock
+  - clocks: References to the parent clocks: first to the EXTAL clock, second
+    to the USB_EXTAL clock
   - #clock-cells: Must be 1
   - clock-output-names: The names of the clocks. Supported clocks are "main",
-    "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1" and "z"
+    "pll0", "pll1", "pll3", "lb", "qspi", "sdh", "sd0", "sd1", "z", "rcan", and
+    "adsp"
 
 
 Example
@@ -26,8 +29,9 @@ Example
                compatible = "renesas,r8a7790-cpg-clocks",
                             "renesas,rcar-gen2-cpg-clocks";
                reg = <0 0xe6150000 0 0x1000>;
-               clocks = <&extal_clk>;
+               clocks = <&extal_clk &usb_extal_clk>;
                #clock-cells = <1>;
                clock-output-names = "main", "pll0, "pll1", "pll3",
-                                    "lb", "qspi", "sdh", "sd0", "sd1", "z";
+                                    "lb", "qspi", "sdh", "sd0", "sd1", "z",
+                                    "rcan", "adsp";
        };
index 67b2b99f2b339f0a35a8fddfd9127779542ca76b..60b44285250d3b3e9e580f9d71e51239df3b1c76 100644 (file)
@@ -26,7 +26,7 @@ Required properties:
        "allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s
        "allwinner,sun7i-a20-ahb-gates-clk" - for the AHB gates on A20
        "allwinner,sun6i-a31-ar100-clk" - for the AR100 on A31
-       "allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31
+       "allwinner,sun6i-a31-ahb1-clk" - for the AHB1 clock on A31
        "allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31
        "allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23
        "allwinner,sun9i-a80-ahb0-gates-clk" - for the AHB0 gates on A80
@@ -55,9 +55,11 @@ Required properties:
        "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
        "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
        "allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13
-       "allwinner,sun4i-a10-mmc-output-clk" - for the MMC output clock on A10
-       "allwinner,sun4i-a10-mmc-sample-clk" - for the MMC sample clock on A10
+       "allwinner,sun4i-a10-mmc-clk" - for the MMC clock
+       "allwinner,sun9i-a80-mmc-clk" - for mmc module clocks on A80
+       "allwinner,sun9i-a80-mmc-config-clk" - for mmc gates + resets on A80
        "allwinner,sun4i-a10-mod0-clk" - for the module 0 family of clocks
+       "allwinner,sun9i-a80-mod0-clk" - for module 0 (storage) clocks on A80
        "allwinner,sun8i-a23-mbus-clk" - for the MBUS clock on A23
        "allwinner,sun7i-a20-out-clk" - for the external output clocks
        "allwinner,sun7i-a20-gmac-clk" - for the GMAC clock module on A20/A31
@@ -73,7 +75,9 @@ Required properties for all clocks:
 - #clock-cells : from common clock binding; shall be set to 0 except for
        the following compatibles where it shall be set to 1:
        "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk",
-       "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk"
+       "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk",
+       "allwinner,*-usb-clk", "allwinner,*-mmc-clk",
+       "allwinner,*-mmc-config-clk"
 - clock-output-names : shall be the corresponding names of the outputs.
        If the clock module only has one output, the name shall be the
        module name.
@@ -81,6 +85,10 @@ Required properties for all clocks:
 And "allwinner,*-usb-clk" clocks also require:
 - reset-cells : shall be set to 1
 
+The "allwinner,sun9i-a80-mmc-config-clk" clock also requires:
+- #reset-cells : shall be set to 1
+- resets : shall be the reset control phandle for the mmc block.
+
 For "allwinner,sun7i-a20-gmac-clk", the parent clocks shall be fixed rate
 dummy clocks at 25 MHz and 125 MHz, respectively. See example.
 
@@ -95,6 +103,14 @@ For "allwinner,sun6i-a31-pll6-clk", there are 2 outputs. The first output
 is the normal PLL6 output, or "pll6". The second output is rate doubled
 PLL6, or "pll6x2".
 
+The "allwinner,*-mmc-clk" clocks have three different outputs: the
+main clock, with the ID 0, and the output and sample clocks, with the
+IDs 1 and 2, respectively.
+
+The "allwinner,sun9i-a80-mmc-config-clk" clock has one clock/reset output
+per mmc controller. The number of outputs is determined by the size of
+the address block, which is related to the overall mmc block.
+
 For example:
 
 osc24M: clk@01c20050 {
@@ -138,11 +154,11 @@ cpu: cpu@01c20054 {
 };
 
 mmc0_clk: clk@01c20088 {
-       #clock-cells = <0>;
-       compatible = "allwinner,sun4i-mod0-clk";
+       #clock-cells = <1>;
+       compatible = "allwinner,sun4i-a10-mmc-clk";
        reg = <0x01c20088 0x4>;
        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-       clock-output-names = "mmc0";
+       clock-output-names = "mmc0", "mmc0_output", "mmc0_sample";
 };
 
 mii_phy_tx_clk: clk@2 {
@@ -170,3 +186,16 @@ gmac_clk: clk@01c20164 {
        clocks = <&mii_phy_tx_clk>, <&gmac_int_tx_clk>;
        clock-output-names = "gmac";
 };
+
+mmc_config_clk: clk@01c13000 {
+       compatible = "allwinner,sun9i-a80-mmc-config-clk";
+       reg = <0x01c13000 0x10>;
+       clocks = <&ahb0_gates 8>;
+       clock-names = "ahb";
+       resets = <&ahb0_resets 8>;
+       reset-names = "ahb";
+       #clock-cells = <1>;
+       #reset-cells = <1>;
+       clock-output-names = "mmc0_config", "mmc1_config",
+                            "mmc2_config", "mmc3_config";
+};
diff --git a/Documentation/devicetree/bindings/clock/ti,cdce706.txt b/Documentation/devicetree/bindings/clock/ti,cdce706.txt
new file mode 100644 (file)
index 0000000..616836e
--- /dev/null
@@ -0,0 +1,42 @@
+Bindings for Texas Instruments CDCE706 programmable 3-PLL clock
+synthesizer/multiplier/divider.
+
+Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf
+
+I2C device node required properties:
+- compatible: shall be "ti,cdce706".
+- reg: i2c device address, shall be in range [0x68...0x6b].
+- #clock-cells: from common clock binding; shall be set to 1.
+- clocks: from common clock binding; list of parent clock
+  handles, shall be reference clock(s) connected to CLK_IN0
+  and CLK_IN1 pins.
+- clock-names: shall be clk_in0 and/or clk_in1. Use clk_in0
+  in case of crystal oscillator or differential signal input
+  configuration. Use clk_in0 and clk_in1 in case of independent
+  single-ended LVCMOS inputs configuration.
+
+Example:
+
+       clocks {
+               clk54: clk54 {
+                       #clock-cells = <0>;
+                       compatible = "fixed-clock";
+                       clock-frequency = <54000000>;
+               };
+       };
+       ...
+       i2c0: i2c-master@0d090000 {
+               ...
+               cdce706: clock-synth@69 {
+                       compatible = "ti,cdce706";
+                       #clock-cells = <1>;
+                       reg = <0x69>;
+                       clocks = <&clk54>;
+                       clock-names = "clk_in0";
+               };
+       };
+       ...
+       simple-audio-card,codec {
+               ...
+               clocks = <&cdce706 4>;
+       };
diff --git a/Documentation/devicetree/bindings/clock/ti/fapll.txt b/Documentation/devicetree/bindings/clock/ti/fapll.txt
new file mode 100644 (file)
index 0000000..c19b3f2
--- /dev/null
@@ -0,0 +1,33 @@
+Binding for Texas Instruments FAPLL clock.
+
+Binding status: Unstable - ABI compatibility may be broken in the future
+
+This binding uses the common clock binding[1]. It assumes a
+register-mapped FAPLL with usually two selectable input clocks
+(reference clock and bypass clock), and one or more child
+syntesizers.
+
+[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Required properties:
+- compatible : shall be "ti,dm816-fapll-clock"
+- #clock-cells : from common clock binding; shall be set to 0.
+- clocks : link phandles of parent clocks (clk-ref and clk-bypass)
+- reg : address and length of the register set for controlling the FAPLL.
+
+Examples:
+       main_fapll: main_fapll {
+               #clock-cells = <1>;
+               compatible = "ti,dm816-fapll-clock";
+               reg = <0x400 0x40>;
+               clocks = <&sys_clkin_ck &sys_clkin_ck>;
+               clock-indices = <1>, <2>, <3>, <4>, <5>,
+                               <6>, <7>;
+               clock-output-names = "main_pll_clk1",
+                                    "main_pll_clk2",
+                                    "main_pll_clk3",
+                                    "main_pll_clk4",
+                                    "main_pll_clk5",
+                                    "main_pll_clk6",
+                                    "main_pll_clk7";
+       };
diff --git a/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt b/Documentation/devicetree/bindings/i2c/brcm,iproc-i2c.txt
new file mode 100644 (file)
index 0000000..81f982c
--- /dev/null
@@ -0,0 +1,37 @@
+Broadcom iProc I2C controller
+
+Required properties:
+
+- compatible:
+    Must be "brcm,iproc-i2c"
+
+- reg:
+    Define the base and range of the I/O address space that contain the iProc
+    I2C controller registers
+
+- interrupts:
+    Should contain the I2C interrupt
+
+- clock-frequency:
+    This is the I2C bus clock. Need to be either 100000 or 400000
+
+- #address-cells:
+    Always 1 (for I2C addresses)
+
+- #size-cells:
+    Always 0
+
+Example:
+       i2c0: i2c@18008000 {
+               compatible = "brcm,iproc-i2c";
+               reg = <0x18008000 0x100>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+               clock-frequency = <100000>;
+
+               codec: wm8750@1a {
+                       compatible = "wlf,wm8750";
+                       reg = <0x1a>;
+               };
+       };
index 34a3fb6f8488b26a9c5def9866898d807ee7df55..cf53d5fba20a0934c631b8320e538cbc5440e155 100644 (file)
@@ -16,6 +16,9 @@ Required Properties:
 Optional Properties:
 
   - reset-gpios: Reference to the GPIO connected to the reset input.
+  - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all
+    children in idle state. This is necessary for example, if there are several
+    multiplexers on the bus and the devices behind them use same I2C addresses.
 
 
 Example:
index 1637c298a1b337bf83612cb12129001ecd11da24..17bef9a34e507541ea3f201039b04e199e551fa3 100644 (file)
@@ -4,24 +4,60 @@ Required properties:
 - compatible      : "opencores,i2c-ocores" or "aeroflexgaisler,i2cmst"
 - reg             : bus address start and address range size of device
 - interrupts      : interrupt number
-- clock-frequency : frequency of bus clock in Hz
+- clocks          : handle to the controller clock; see the note below.
+                    Mutually exclusive with opencores,ip-clock-frequency
+- opencores,ip-clock-frequency: frequency of the controller clock in Hz;
+                    see the note below. Mutually exclusive with clocks
 - #address-cells  : should be <1>
 - #size-cells     : should be <0>
 
 Optional properties:
+- clock-frequency : frequency of bus clock in Hz; see the note below.
+                    Defaults to 100 KHz when the property is not specified
 - reg-shift       : device register offsets are shifted by this value
 - reg-io-width    : io register width in bytes (1, 2 or 4)
 - regstep         : deprecated, use reg-shift above
 
-Example:
+Note
+clock-frequency property is meant to control the bus frequency for i2c bus
+drivers, but it was incorrectly used to specify i2c controller input clock
+frequency. So the following rules are set to fix this situation:
+- if clock-frequency is present and neither opencores,ip-clock-frequency nor
+  clocks are, then clock-frequency specifies i2c controller clock frequency.
+  This is to keep backwards compatibility with setups using old DTB. i2c bus
+  frequency is fixed at 100 KHz.
+- if clocks is present it specifies i2c controller clock. clock-frequency
+  property specifies i2c bus frequency.
+- if opencores,ip-clock-frequency is present it specifies i2c controller
+  clock frequency. clock-frequency property specifies i2c bus frequency.
 
+Examples:
+
+       i2c0: ocores@a0000000 {
+               #address-cells = <1>;
+               #size-cells = <0>;
+               compatible = "opencores,i2c-ocores";
+               reg = <0xa0000000 0x8>;
+               interrupts = <10>;
+               opencores,ip-clock-frequency = <20000000>;
+
+               reg-shift = <0>;        /* 8 bit registers */
+               reg-io-width = <1>;     /* 8 bit read/write */
+
+               dummy@60 {
+                       compatible = "dummy";
+                       reg = <0x60>;
+               };
+       };
+or
        i2c0: ocores@a0000000 {
                #address-cells = <1>;
                #size-cells = <0>;
                compatible = "opencores,i2c-ocores";
                reg = <0xa0000000 0x8>;
                interrupts = <10>;
-               clock-frequency = <20000000>;
+               clocks = <&osc>;
+               clock-frequency = <400000>; /* i2c bus frequency 400 KHz */
 
                reg-shift = <0>;        /* 8 bit registers */
                reg-io-width = <1>;     /* 8 bit read/write */
index dde6c22ce91a13df20eaab99ebd6dbf684983768..f0d71bc52e64be39cea42a7cc04b1e05662ad641 100644 (file)
@@ -21,6 +21,17 @@ Required on RK3066, RK3188 :
 Optional properties :
 
  - clock-frequency : SCL frequency to use (in Hz). If omitted, 100kHz is used.
+ - i2c-scl-rising-time-ns : Number of nanoseconds the SCL signal takes to rise
+       (t(r) in I2C specification). If not specified this is assumed to be
+       the maximum the specification allows(1000 ns for Standard-mode,
+       300 ns for Fast-mode) which might cause slightly slower communication.
+ - i2c-scl-falling-time-ns : Number of nanoseconds the SCL signal takes to fall
+       (t(f) in the I2C specification). If not specified this is assumed to
+       be the maximum the specification allows (300 ns) which might cause
+       slightly slower communication.
+ - i2c-sda-falling-time-ns : Number of nanoseconds the SDA signal takes to fall
+       (t(f) in the I2C specification). If not specified we'll use the SCL
+       value since they are the same in nearly all cases.
 
 Example:
 
@@ -39,4 +50,7 @@ i2c0: i2c@2002d000 {
 
        clock-names = "i2c";
        clocks = <&cru PCLK_I2C0>;
+
+       i2c-scl-rising-time-ns = <800>;
+       i2c-scl-falling-time-ns = <100>;
 };
index 4dcd88d5f7ca453503120236a2ee3670ac7a55e8..aaa8325004d23ae6313223594817f392c1a31359 100644 (file)
@@ -61,9 +61,8 @@ fsl,sgtl5000          SGTL5000: Ultra Low-Power Audio Codec
 gmt,g751               G751: Digital Temperature Sensor and Thermal Watchdog with Two-Wire Interface
 infineon,slb9635tt     Infineon SLB9635 (Soft-) I2C TPM (old protocol, max 100khz)
 infineon,slb9645tt     Infineon SLB9645 I2C TPM (new protocol, max 400khz)
-isl,isl12057           Intersil ISL12057 I2C RTC Chip
-isil,isl29028           (deprecated, use isl)
-isl,isl29028            Intersil ISL29028 Ambient Light and Proximity Sensor
+isil,isl12057          Intersil ISL12057 I2C RTC Chip
+isil,isl29028          Intersil ISL29028 Ambient Light and Proximity Sensor
 maxim,ds1050           5 Bit Programmable, Pulse-Width Modulator
 maxim,max1237          Low-Power, 4-/12-Channel, 2-Wire Serial, 12-Bit ADCs
 maxim,max6625          9-Bit/12-Bit Temperature Sensors with I²C-Compatible Serial Interface
diff --git a/Documentation/devicetree/bindings/mips/cavium/cib.txt b/Documentation/devicetree/bindings/mips/cavium/cib.txt
new file mode 100644 (file)
index 0000000..f39a1aa
--- /dev/null
@@ -0,0 +1,43 @@
+* Cavium Interrupt Bus widget
+
+Properties:
+- compatible: "cavium,octeon-7130-cib"
+
+  Compatibility with cn70XX SoCs.
+
+- interrupt-controller:  This is an interrupt controller.
+
+- reg: Two elements consisting of the addresses of the RAW and EN
+  registers of the CIB block
+
+- cavium,max-bits: The index (zero based) of the highest numbered bit
+  in the CIB block.
+
+- interrupt-parent:  Always the CIU on the SoC.
+
+- interrupts: The CIU line to which the CIB block is connected.
+
+- #interrupt-cells: Must be <2>.  The first cell is the bit within the
+   CIB.  The second cell specifies the triggering semantics of the
+   line.
+
+Example:
+
+       interrupt-controller@107000000e000 {
+               compatible = "cavium,octeon-7130-cib";
+               reg = <0x10700 0x0000e000 0x0 0x8>, /* RAW */
+                     <0x10700 0x0000e100 0x0 0x8>; /* EN */
+               cavium,max-bits = <23>;
+
+               interrupt-controller;
+               interrupt-parent = <&ciu>;
+               interrupts = <1 24>;
+               /* Interrupts are specified by two parts:
+                * 1) Bit number in the CIB* registers
+                * 2) Triggering (1 - edge rising
+                *                2 - edge falling
+                *                4 - level active high
+                *                8 - level active low)
+                */
+               #interrupt-cells = <2>;
+       };
index 91b3a34671508bdf11a1f368dde9588fc34961df..4bf41d8338046ef5aef2e47e4ee1dccf727acfe2 100644 (file)
@@ -10,8 +10,8 @@ Absolute maximum transfer rate is 200MB/s
 Required properties:
  - compatible : "allwinner,sun4i-a10-mmc" or "allwinner,sun5i-a13-mmc"
  - reg : mmc controller base registers
- - clocks : a list with 2 phandle + clock specifier pairs
- - clock-names : must contain "ahb" and "mmc"
+ - clocks : a list with 4 phandle + clock specifier pairs
+ - clock-names : must contain "ahb", "mmc", "output" and "sample"
  - interrupts : mmc controller interrupt
 
 Optional properties:
@@ -25,8 +25,8 @@ Examples:
        mmc0: mmc@01c0f000 {
                compatible = "allwinner,sun5i-a13-mmc";
                reg = <0x01c0f000 0x1000>;
-               clocks = <&ahb_gates 8>, <&mmc0_clk>;
-               clock-names = "ahb", "mod";
+               clocks = <&ahb_gates 8>, <&mmc0_clk>, <&mmc0_output_clk>, <&mmc0_sample_clk>;
+               clock-names = "ahb", "mod", "output", "sample";
                interrupts = <0 32 4>;
                status = "disabled";
        };
index ae738f562acca6bf915ef5691dfd32da9d2e23c8..695150a4136bb3c3291e94fcbf7f7cdddfe6045e 100644 (file)
@@ -12,6 +12,7 @@
               "samsung,exynos5420-tmu-ext-triminfo" for TMU channels 2, 3 and 4
                        Exynos5420 (Must pass triminfo base and triminfo clock)
               "samsung,exynos5440-tmu"
+              "samsung,exynos7-tmu"
 - interrupt-parent : The phandle for the interrupt controller
 - reg : Address range of the thermal registers. For soc's which has multiple
        instances of TMU and some registers are shared across all TMU's like
 - clocks : The main clocks for TMU device
        -- 1. operational clock for TMU channel
        -- 2. optional clock to access the shared registers of TMU channel
+       -- 3. optional special clock for functional operation
 - clock-names : Thermal system clock name
        -- "tmu_apbif" operational clock for current TMU channel
        -- "tmu_triminfo_apbif" clock to access the shared triminfo register
                for current TMU channel
+       -- "tmu_sclk" clock for functional operation of the current TMU
+               channel
 - vtmu-supply: This entry is optional and provides the regulator node supplying
                voltage to TMU. If needed this entry can be placed inside
                board/platform specific dts file.
+Following properties are mandatory (depending on SoC):
+- samsung,tmu_gain: Gain value for internal TMU operation.
+- samsung,tmu_reference_voltage: Value of TMU IP block's reference voltage
+- samsung,tmu_noise_cancel_mode: Mode for noise cancellation
+- samsung,tmu_efuse_value: Default level of temperature - it is needed when
+                          in factory fusing produced wrong value
+- samsung,tmu_min_efuse_value: Minimum temperature fused value
+- samsung,tmu_max_efuse_value: Maximum temperature fused value
+- samsung,tmu_first_point_trim: First point trimming value
+- samsung,tmu_second_point_trim: Second point trimming value
+- samsung,tmu_default_temp_offset: Default temperature offset
+- samsung,tmu_cal_type: Callibration type
 
 Example 1):
 
@@ -51,6 +67,7 @@ Example 1):
                clock-names = "tmu_apbif";
                status = "disabled";
                vtmu-supply = <&tmu_regulator_node>;
+               #include "exynos4412-tmu-sensor-conf.dtsi"
        };
 
 Example 2):
@@ -61,6 +78,7 @@ Example 2):
                interrupts = <0 58 0>;
                clocks = <&clock 21>;
                clock-names = "tmu_apbif";
+               #include "exynos5440-tmu-sensor-conf.dtsi"
        };
 
 Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
@@ -70,6 +88,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
                interrupts = <0 184 0>;
                clocks = <&clock 318>, <&clock 318>;
                clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+               #include "exynos4412-tmu-sensor-conf.dtsi"
        };
 
        tmu_cpu3: tmu@1006c000 {
@@ -78,6 +97,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
                interrupts = <0 185 0>;
                clocks = <&clock 318>, <&clock 319>;
                clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+               #include "exynos4412-tmu-sensor-conf.dtsi"
        };
 
        tmu_gpu: tmu@100a0000 {
@@ -86,6 +106,7 @@ Example 3): (In case of Exynos5420 "with misplaced TRIMINFO register")
                interrupts = <0 215 0>;
                clocks = <&clock 319>, <&clock 318>;
                clock-names = "tmu_apbif", "tmu_triminfo_apbif";
+               #include "exynos4412-tmu-sensor-conf.dtsi"
        };
 
 Note: For multi-instance tmu each instance should have an alias correctly
index f5db6b72a36fdb78a2f3e72413e1241ae4867a66..29fe0bfae38e454975dddf145d20d6acbde8bef6 100644 (file)
@@ -251,24 +251,24 @@ ocp {
 };
 
 thermal-zones {
-       cpu-thermal: cpu-thermal {
+       cpu_thermal: cpu-thermal {
                polling-delay-passive = <250>; /* milliseconds */
                polling-delay = <1000>; /* milliseconds */
 
                thermal-sensors = <&bandgap0>;
 
                trips {
-                       cpu-alert0: cpu-alert {
+                       cpu_alert0: cpu-alert0 {
                                temperature = <90000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "active";
                        };
-                       cpu-alert1: cpu-alert {
+                       cpu_alert1: cpu-alert1 {
                                temperature = <100000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        };
-                       cpu-crit: cpu-crit {
+                       cpu_crit: cpu-crit {
                                temperature = <125000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "critical";
@@ -277,17 +277,17 @@ thermal-zones {
 
                cooling-maps {
                        map0 {
-                               trip = <&cpu-alert0>;
-                               cooling-device = <&fan0 THERMAL_NO_LIMITS 4>;
+                               trip = <&cpu_alert0>;
+                               cooling-device = <&fan0 THERMAL_NO_LIMIT 4>;
                        };
                        map1 {
-                               trip = <&cpu-alert1>;
-                               cooling-device = <&fan0 5 THERMAL_NO_LIMITS>;
+                               trip = <&cpu_alert1>;
+                               cooling-device = <&fan0 5 THERMAL_NO_LIMIT>;
                        };
                        map2 {
-                               trip = <&cpu-alert1>;
+                               trip = <&cpu_alert1>;
                                cooling-device =
-                                   <&cpu0 THERMAL_NO_LIMITS THERMAL_NO_LIMITS>;
+                                   <&cpu0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>;
                        };
                };
        };
@@ -298,13 +298,13 @@ used to monitor the zone 'cpu-thermal' using its sole sensor. A fan
 device (fan0) is controlled via I2C bus 1, at address 0x48, and has ten
 different cooling states 0-9. It is used to remove the heat out of
 the thermal zone 'cpu-thermal' using its cooling states
-from its minimum to 4, when it reaches trip point 'cpu-alert0'
+from its minimum to 4, when it reaches trip point 'cpu_alert0'
 at 90C, as an example of active cooling. The same cooling device is used at
-'cpu-alert1', but from 5 to its maximum state. The cpu@0 device is also
+'cpu_alert1', but from 5 to its maximum state. The cpu@0 device is also
 linked to the same thermal zone, 'cpu-thermal', as a passive cooling device,
-using all its cooling states at trip point 'cpu-alert1',
+using all its cooling states at trip point 'cpu_alert1',
 which is a trip point at 100C. On the thermal zone 'cpu-thermal', at the
-temperature of 125C, represented by the trip point 'cpu-crit', the silicon
+temperature of 125C, represented by the trip point 'cpu_crit', the silicon
 is not reliable anymore.
 
 (b) - IC with several internal sensors
@@ -329,7 +329,7 @@ ocp {
 };
 
 thermal-zones {
-       cpu-thermal: cpu-thermal {
+       cpu_thermal: cpu-thermal {
                polling-delay-passive = <250>; /* milliseconds */
                polling-delay = <1000>; /* milliseconds */
 
@@ -338,12 +338,12 @@ thermal-zones {
 
                trips {
                        /* each zone within the SoC may have its own trips */
-                       cpu-alert: cpu-alert {
+                       cpu_alert: cpu-alert {
                                temperature = <100000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        };
-                       cpu-crit: cpu-crit {
+                       cpu_crit: cpu-crit {
                                temperature = <125000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "critical";
@@ -356,7 +356,7 @@ thermal-zones {
                };
        };
 
-       gpu-thermal: gpu-thermal {
+       gpu_thermal: gpu-thermal {
                polling-delay-passive = <120>; /* milliseconds */
                polling-delay = <1000>; /* milliseconds */
 
@@ -365,12 +365,12 @@ thermal-zones {
 
                trips {
                        /* each zone within the SoC may have its own trips */
-                       gpu-alert: gpu-alert {
+                       gpu_alert: gpu-alert {
                                temperature = <90000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        };
-                       gpu-crit: gpu-crit {
+                       gpu_crit: gpu-crit {
                                temperature = <105000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "critical";
@@ -383,7 +383,7 @@ thermal-zones {
                };
        };
 
-       dsp-thermal: dsp-thermal {
+       dsp_thermal: dsp-thermal {
                polling-delay-passive = <50>; /* milliseconds */
                polling-delay = <1000>; /* milliseconds */
 
@@ -392,12 +392,12 @@ thermal-zones {
 
                trips {
                        /* each zone within the SoC may have its own trips */
-                       dsp-alert: gpu-alert {
+                       dsp_alert: dsp-alert {
                                temperature = <90000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        };
-                       dsp-crit: gpu-crit {
+                       dsp_crit: gpu-crit {
                                temperature = <135000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "critical";
@@ -457,7 +457,7 @@ ocp {
 };
 
 thermal-zones {
-       cpu-thermal: cpu-thermal {
+       cpu_thermal: cpu-thermal {
                polling-delay-passive = <250>; /* milliseconds */
                polling-delay = <1000>; /* milliseconds */
 
@@ -508,7 +508,7 @@ with many sensors and many cooling devices.
        /*
         * An IC with several temperature sensor.
         */
-       adc-dummy: sensor@0x50 {
+       adc_dummy: sensor@0x50 {
                ...
                #thermal-sensor-cells = <1>; /* sensor internal ID */
        };
@@ -520,7 +520,7 @@ thermal-zones {
                polling-delay = <2500>; /* milliseconds */
 
                                /* sensor       ID */
-               thermal-sensors = <&adc-dummy     4>;
+               thermal-sensors = <&adc_dummy     4>;
 
                trips {
                        ...
@@ -531,14 +531,14 @@ thermal-zones {
                };
        };
 
-       board-thermal: board-thermal {
+       board_thermal: board-thermal {
                polling-delay-passive = <1000>; /* milliseconds */
                polling-delay = <2500>; /* milliseconds */
 
                                /* sensor       ID */
-               thermal-sensors = <&adc-dummy     0>, /* pcb top edge */
-                                 <&adc-dummy     1>, /* lcd */
-                                 <&adc-dymmy     2>; /* back cover */
+               thermal-sensors = <&adc_dummy     0>, /* pcb top edge */
+                                 <&adc_dummy     1>, /* lcd */
+                                 <&adc_dummy     2>; /* back cover */
                /*
                 * An array of coefficients describing the sensor
                 * linear relation. E.g.:
@@ -548,22 +548,22 @@ thermal-zones {
 
                trips {
                        /* Trips are based on resulting linear equation */
-                       cpu-trip: cpu-trip {
+                       cpu_trip: cpu-trip {
                                temperature = <60000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        };
-                       gpu-trip: gpu-trip {
+                       gpu_trip: gpu-trip {
                                temperature = <55000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        }
-                       lcd-trip: lcp-trip {
+                       lcd_trip: lcp-trip {
                                temperature = <53000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "passive";
                        };
-                       crit-trip: crit-trip {
+                       crit_trip: crit-trip {
                                temperature = <68000>; /* millicelsius */
                                hysteresis = <2000>; /* millicelsius */
                                type = "critical";
@@ -572,17 +572,17 @@ thermal-zones {
 
                cooling-maps {
                        map0 {
-                               trip = <&cpu-trip>;
+                               trip = <&cpu_trip>;
                                cooling-device = <&cpu0 0 2>;
                                contribution = <55>;
                        };
                        map1 {
-                               trip = <&gpu-trip>;
+                               trip = <&gpu_trip>;
                                cooling-device = <&gpu0 0 2>;
                                contribution = <20>;
                        };
                        map2 {
-                               trip = <&lcd-trip>;
+                               trip = <&lcd_trip>;
                                cooling-device = <&lcd0 5 10>;
                                contribution = <15>;
                        };
index 2ca3d17eee56380cec075d5279c2703fa2b1ed9e..f91926f2f4824dee2c6785dcaf4cedd84df838e9 100644 (file)
@@ -164,8 +164,6 @@ the block device inode.  See there for more details.
 
 --------------------------- file_system_type ---------------------------
 prototypes:
-       int (*get_sb) (struct file_system_type *, int,
-                      const char *, void *, struct vfsmount *);
        struct dentry *(*mount) (struct file_system_type *, int,
                       const char *, void *);
        void (*kill_sb) (struct super_block *);
index a27c950ece61b0d312fbba2a3757fab71c5b3616..6db0e5d1da07ee52d13f5997b84b3b10ef43e2dd 100644 (file)
@@ -159,6 +159,22 @@ overlay filesystem (though an operation on the name of the file such as
 rename or unlink will of course be noticed and handled).
 
 
+Multiple lower layers
+---------------------
+
+Multiple lower layers can now be given using the the colon (":") as a
+separator character between the directory names.  For example:
+
+  mount -t overlay overlay -olowerdir=/lower1:/lower2:/lower3 /merged
+
+As the example shows, "upperdir=" and "workdir=" may be omitted.  In
+that case the overlay will be read-only.
+
+The specified lower directories will be stacked beginning from the
+rightmost one and going left.  In the above example lower1 will be the
+top, lower2 the middle and lower3 the bottom layer.
+
+
 Non-standard behavior
 ---------------------
 
@@ -196,3 +212,15 @@ Changes to the underlying filesystems while part of a mounted overlay
 filesystem are not allowed.  If the underlying filesystem is changed,
 the behavior of the overlay is undefined, though it will not result in
 a crash or deadlock.
+
+Testsuite
+---------
+
+There's testsuite developed by David Howells at:
+
+  git://git.infradead.org/users/dhowells/unionmount-testsuite.git
+
+Run as root:
+
+  # cd unionmount-testsuite
+  # ./run --ov
index 4556a3eb87c454f3db1c9efbd9c057f454c135d6..4aae8ed15873d1d3c1cc8fc9d80d3ef9b42e5654 100644 (file)
@@ -12,7 +12,7 @@ FUNCTIONALITY CONSTANTS
 -----------------------
 
 For the most up-to-date list of functionality constants, please check
-<linux/i2c.h>!
+<uapi/linux/i2c.h>!
 
   I2C_FUNC_I2C                    Plain i2c-level commands (Pure SMBus
                                   adapters typically can not do these)
index 90bca6f988e115e046583c13b9d157ebc38bd280..a63e5e013a8cddee63b1d3520dd1c2c73e80dc31 100644 (file)
@@ -3,8 +3,8 @@ ALPS Touchpad Protocol
 
 Introduction
 ------------
-Currently the ALPS touchpad driver supports five protocol versions in use by
-ALPS touchpads, called versions 1, 2, 3, 4 and 5.
+Currently the ALPS touchpad driver supports seven protocol versions in use by
+ALPS touchpads, called versions 1, 2, 3, 4, 5, 6 and 7.
 
 Since roughly mid-2010 several new ALPS touchpads have been released and
 integrated into a variety of laptops and netbooks.  These new touchpads
@@ -240,3 +240,67 @@ For mt, the format is:
  byte 3:    0  x23  x22   x21 x20  x19  x18   x17
  byte 4:    0   x9   x8    x7  x6   x5   x4    x3
  byte 5:    0  x16  x15   x14 x13  x12  x11   x10
+
+ALPS Absolute Mode - Protocol Version 6
+---------------------------------------
+
+For trackstick packet, the format is:
+
+ byte 0:    1    1    1    1    1    1    1    1
+ byte 1:    0   X6   X5   X4   X3   X2   X1   X0
+ byte 2:    0   Y6   Y5   Y4   Y3   Y2   Y1   Y0
+ byte 3:    ?   Y7   X7    ?    ?    M    R    L
+ byte 4:   Z7   Z6   Z5   Z4   Z3   Z2   Z1   Z0
+ byte 5:    0    1    1    1    1    1    1    1
+
+For touchpad packet, the format is:
+
+ byte 0:    1    1    1    1    1    1    1    1
+ byte 1:    0    0    0    0   x3   x2   x1   x0
+ byte 2:    0    0    0    0   y3   y2   y1   y0
+ byte 3:    ?   x7   x6   x5   x4    ?    r    l
+ byte 4:    ?   y7   y6   y5   y4    ?    ?    ?
+ byte 5:   z7   z6   z5   z4   z3   z2   z1   z0
+
+(v6 touchpad does not have middle button)
+
+ALPS Absolute Mode - Protocol Version 7
+---------------------------------------
+
+For trackstick packet, the format is:
+
+ byte 0:    0    1    0    0    1    0    0    0
+ byte 1:    1    1    *    *    1    M    R    L
+ byte 2:   X7    1   X5   X4   X3   X2   X1   X0
+ byte 3:   Z6    1   Y6   X6    1   Y2   Y1   Y0
+ byte 4:   Y7    0   Y5   Y4   Y3    1    1    0
+ byte 5:  T&P    0   Z5   Z4   Z3   Z2   Z1   Z0
+
+For touchpad packet, the format is:
+
+         packet-fmt     b7     b6     b5     b4     b3     b2     b1     b0
+ byte 0: TWO & MULTI     L      1      R      M      1   Y0-2   Y0-1   Y0-0
+ byte 0: NEW             L      1   X1-5      1      1   Y0-2   Y0-1   Y0-0
+ byte 1:             Y0-10   Y0-9   Y0-8   Y0-7   Y0-6   Y0-5   Y0-4   Y0-3
+ byte 2:             X0-11      1  X0-10   X0-9   X0-8   X0-7   X0-6   X0-5
+ byte 3:             X1-11      1   X0-4   X0-3      1   X0-2   X0-1   X0-0
+ byte 4: TWO         X1-10    TWO   X1-9   X1-8   X1-7   X1-6   X1-5   X1-4
+ byte 4: MULTI       X1-10    TWO   X1-9   X1-8   X1-7   X1-6   Y1-5      1
+ byte 4: NEW         X1-10    TWO   X1-9   X1-8   X1-7   X1-6      0      0
+ byte 5: TWO & NEW   Y1-10      0   Y1-9   Y1-8   Y1-7   Y1-6   Y1-5   Y1-4
+ byte 5: MULTI       Y1-10      0   Y1-9   Y1-8   Y1-7   Y1-6    F-1    F-0
+
+ L:         Left button
+ R / M:     Non-clickpads: Right / Middle button
+            Clickpads: When > 2 fingers are down, and some fingers
+            are in the button area, then the 2 coordinates reported
+            are for fingers outside the button area and these report
+            extra fingers being present in the right / left button
+            area. Note these fingers are not added to the F field!
+            so if a TWO packet is received and R = 1 then there are
+            3 fingers down, etc.
+ TWO:       1: Two touches present, byte 0/4/5 are in TWO fmt
+            0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
+               otherwise byte 0 bit 4 must be set and byte 0/4/5 are
+               in NEW fmt
+ F:         Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
index a311db829e9bb6a819bc293404d65d5c3584a741..74b6c6d97210902d63fc6022e02968c5dc317c02 100644 (file)
@@ -524,15 +524,16 @@ more details, with real examples.
        Example:
                #arch/x86/Makefile
                cflags-y += $(shell \
-               if [ $(call cc-version) -ge 0300 ] ; then \
+               if [ $(cc-version) -ge 0300 ] ; then \
                        echo "-mregparm=3"; fi ;)
 
        In the above example, -mregparm=3 is only used for gcc version greater
        than or equal to gcc 3.0.
 
     cc-ifversion
-       cc-ifversion tests the version of $(CC) and equals last argument if
-       version expression is true.
+       cc-ifversion tests the version of $(CC) and equals the fourth parameter
+       if version expression is true, or the fifth (if given) if the version
+       expression is false.
 
        Example:
                #fs/reiserfs/Makefile
@@ -552,7 +553,7 @@ more details, with real examples.
 
        Example:
                #arch/powerpc/Makefile
-               $(Q)if test "$(call cc-fullversion)" = "040200" ; then \
+               $(Q)if test "$(cc-fullversion)" = "040200" ; then \
                        echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
                        false ; \
                fi
@@ -751,12 +752,12 @@ generated by kbuild are deleted all over the kernel src tree when
 Additional files can be specified in kbuild makefiles by use of $(clean-files).
 
        Example:
-               #drivers/pci/Makefile
-               clean-files := devlist.h classlist.h
+               #lib/Makefile
+               clean-files := crc32table.h
 
 When executing "make clean", the two files "devlist.h classlist.h" will be
 deleted. Kbuild will assume files to be in the same relative directory as the
-Makefile except if an absolute path is specified (path starting with '/').
+Makefile, except if prefixed with $(objtree).
 
 To delete a directory hierarchy use:
 
@@ -764,9 +765,8 @@ To delete a directory hierarchy use:
                #scripts/package/Makefile
                clean-dirs := $(objtree)/debian/
 
-This will delete the directory debian, including all subdirectories.
-Kbuild will assume the directories to be in the same relative path as the
-Makefile if no absolute path is specified (path does not start with '/').
+This will delete the directory debian in the toplevel directory, including all
+subdirectories.
 
 To exclude certain files from make clean, use the $(no-clean-files) variable.
 This is only a special case used in the top level Kbuild file:
index 199f453cb4de10016030c2dd230fb9a3a3125cee..82fbdbc1e0b0626611462132d38fc29c5ee27078 100644 (file)
@@ -3,7 +3,7 @@ protocol of kernel. These should be filled by bootloader or 16-bit
 real-mode setup code of the kernel. References/settings to it mainly
 are in:
 
-  arch/x86/include/asm/bootparam.h
+  arch/x86/include/uapi/asm/bootparam.h
 
 
 Offset Proto   Name            Meaning
diff --git a/Kbuild b/Kbuild
index b8b708ad6dc3815eb0d23bfea2c972d03b9477c0..ab8ded92e870f55290d4d8283e4cbb1b849f8a58 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -5,24 +5,23 @@
 # 2) Generate asm-offsets.h (may need bounds.h)
 # 3) Check for missing system calls
 
-#####
-# 1) Generate bounds.h
-
-bounds-file := include/generated/bounds.h
-
-always  := $(bounds-file)
-targets := $(bounds-file) kernel/bounds.s
+# Default sed regexp - multiline due to syntax constraints
+define sed-y
+       "/^->/{s:->#\(.*\):/* \1 */:; \
+       s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
+       s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
+       s:->::; p;}"
+endef
 
-quiet_cmd_bounds = GEN     $@
-define cmd_bounds
+quiet_cmd_offsets = GEN     $@
+define cmd_offsets
        (set -e; \
-        echo "#ifndef __LINUX_BOUNDS_H__"; \
-        echo "#define __LINUX_BOUNDS_H__"; \
+        echo "#ifndef $2"; \
+        echo "#define $2"; \
         echo "/*"; \
         echo " * DO NOT MODIFY."; \
         echo " *"; \
         echo " * This file was generated by Kbuild"; \
-        echo " *"; \
         echo " */"; \
         echo ""; \
         sed -ne $(sed-y) $<; \
@@ -30,6 +29,14 @@ define cmd_bounds
         echo "#endif" ) > $@
 endef
 
+#####
+# 1) Generate bounds.h
+
+bounds-file := include/generated/bounds.h
+
+always  := $(bounds-file)
+targets := $(bounds-file) kernel/bounds.s
+
 # We use internal kbuild rules to avoid the "is up to date" message from make
 kernel/bounds.s: kernel/bounds.c FORCE
        $(Q)mkdir -p $(dir $@)
@@ -37,7 +44,7 @@ kernel/bounds.s: kernel/bounds.c FORCE
 
 $(obj)/$(bounds-file): kernel/bounds.s Kbuild
        $(Q)mkdir -p $(dir $@)
-       $(call cmd,bounds)
+       $(call cmd,offsets,__LINUX_BOUNDS_H__)
 
 #####
 # 2) Generate asm-offsets.h
@@ -49,32 +56,6 @@ always  += $(offsets-file)
 targets += $(offsets-file)
 targets += arch/$(SRCARCH)/kernel/asm-offsets.s
 
-
-# Default sed regexp - multiline due to syntax constraints
-define sed-y
-       "/^->/{s:->#\(.*\):/* \1 */:; \
-       s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
-       s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
-       s:->::; p;}"
-endef
-
-quiet_cmd_offsets = GEN     $@
-define cmd_offsets
-       (set -e; \
-        echo "#ifndef __ASM_OFFSETS_H__"; \
-        echo "#define __ASM_OFFSETS_H__"; \
-        echo "/*"; \
-        echo " * DO NOT MODIFY."; \
-        echo " *"; \
-        echo " * This file was generated by Kbuild"; \
-        echo " *"; \
-        echo " */"; \
-        echo ""; \
-        sed -ne $(sed-y) $<; \
-        echo ""; \
-        echo "#endif" ) > $@
-endef
-
 # We use internal kbuild rules to avoid the "is up to date" message from make
 arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
                                       $(obj)/$(bounds-file) FORCE
@@ -82,7 +63,7 @@ arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
        $(call if_changed_dep,cc_s_c)
 
 $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s Kbuild
-       $(call cmd,offsets)
+       $(call cmd,offsets,__ASM_OFFSETS_H__)
 
 #####
 # 3) Check for missing system calls
index 1921ed58d1a093a1270a67893cca0cc299182cbe..ddc5a8cf9a8ac0078f8ca1bc99d9c48f8197214a 100644 (file)
@@ -2433,7 +2433,8 @@ F:        arch/powerpc/oprofile/*cell*
 F:     arch/powerpc/platforms/cell/
 
 CEPH DISTRIBUTED FILE SYSTEM CLIENT
-M:     Sage Weil <sage@inktank.com>
+M:     Yan, Zheng <zyan@redhat.com>
+M:     Sage Weil <sage@redhat.com>
 L:     ceph-devel@vger.kernel.org
 W:     http://ceph.com/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
@@ -3936,7 +3937,7 @@ S:        Maintained
 F:     drivers/staging/fbtft/
 
 FCOE SUBSYSTEM (libfc, libfcoe, fcoe)
-M:     Robert Love <robert.w.love@intel.com>
+M:     Vasu Dev <vasu.dev@intel.com>
 L:     fcoe-devel@open-fcoe.org
 W:     www.Open-FCoE.org
 S:     Supported
@@ -7998,8 +7999,8 @@ S:        Supported
 F:     drivers/net/wireless/ath/wcn36xx/
 
 RADOS BLOCK DEVICE (RBD)
-M:     Yehuda Sadeh <yehuda@inktank.com>
-M:     Sage Weil <sage@inktank.com>
+M:     Ilya Dryomov <idryomov@gmail.com>
+M:     Sage Weil <sage@redhat.com>
 M:     Alex Elder <elder@kernel.org>
 M:     ceph-devel@vger.kernel.org
 W:     http://ceph.com/
@@ -8566,7 +8567,7 @@ S:        Maintained
 F:     drivers/scsi/sr*
 
 SCSI RDMA PROTOCOL (SRP) INITIATOR
-M:     Bart Van Assche <bvanassche@acm.org>
+M:     Bart Van Assche <bart.vanassche@sandisk.com>
 L:     linux-rdma@vger.kernel.org
 S:     Supported
 W:     http://www.openfabrics.org
@@ -9718,6 +9719,11 @@ L:       linux-omap@vger.kernel.org
 S:     Maintained
 F:     drivers/thermal/ti-soc-thermal/
 
+TI CDCE706 CLOCK DRIVER
+M:     Max Filippov <jcmvbkbc@gmail.com>
+S:     Maintained
+F:     drivers/clk/clk-cdce706.c
+
 TI CLOCK DRIVER
 M:     Tero Kristo <t-kristo@ti.com>
 L:     linux-omap@vger.kernel.org
index dd8796caa23982428676d25ed78967d3b2d0e069..9fab639727c78e5370538afcd6980f167af6fdf6 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
-VERSION = 3
-PATCHLEVEL = 19
+VERSION = 4
+PATCHLEVEL = 0
 SUBLEVEL = 0
-EXTRAVERSION =
-NAME = Diseased Newt
+EXTRAVERSION = -rc1
+NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
 # To see a list of typical targets execute "make help"
@@ -502,7 +502,7 @@ endif
 ifeq ($(KBUILD_EXTMOD),)
         ifneq ($(filter config %config,$(MAKECMDGOALS)),)
                 config-targets := 1
-                ifneq ($(filter-out config %config,$(MAKECMDGOALS)),)
+                ifneq ($(words $(MAKECMDGOALS)),1)
                         mixed-targets := 1
                 endif
         endif
@@ -1180,7 +1180,7 @@ CLEAN_DIRS  += $(MODVERDIR)
 # Directories & files removed with 'make mrproper'
 MRPROPER_DIRS  += include/config usr/include include/generated          \
                  arch/*/include/generated .tmp_objdiff
-MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
+MRPROPER_FILES += .config .config.old .version .old_version \
                  Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
                  signing_key.priv signing_key.x509 x509.genkey         \
                  extra_certificates signing_key.x509.keyid             \
index 5126f9e77a9883ceb4f8af0f24a46d17096e084c..ff5fb6ab0b9748dbecd27fd7432c4f8306f5221d 100644 (file)
                };
        };
 
+       i2c0: i2c@18008000 {
+               compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c";
+               reg = <0x18008000 0x100>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+               clock-frequency = <100000>;
+               status = "disabled";
+       };
+
+       i2c1: i2c@1800b000 {
+               compatible = "brcm,cygnus-iproc-i2c", "brcm,iproc-i2c";
+               reg = <0x1800b000 0x100>;
+               #address-cells = <1>;
+               #size-cells = <0>;
+               interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+               clock-frequency = <100000>;
+               status = "disabled";
+       };
+
        uart0: serial@18020000 {
                compatible = "snps,dw-apb-uart";
                reg = <0x18020000 0x100>;
index d2d8e94e0aa2042b80dff2d34b49bad66e16345d..f46329c8ad75c00c068e269565bdb1b931083f47 100644 (file)
@@ -66,8 +66,9 @@
                        reg = <0x1d000 0x1000>;
                        cache-unified;
                        cache-level = <2>;
-                       cache-sets = <16>;
-                       cache-size = <0x80000>;
+                       cache-size = <524288>;
+                       cache-sets = <1024>;
+                       cache-line-size = <32>;
                        interrupts = <GIC_PPI 0 IRQ_TYPE_LEVEL_HIGH>;
                };
 
index 8ca3c1a2063deeb0eb426a7b320b269f77e4b04d..5c2925831f2038318258003ae6cd3736da71c0de 100644 (file)
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                mmc3_clk: clk@01c20094 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20094 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc3";
+                       clock-output-names = "mmc3",
+                                            "mmc3_output",
+                                            "mmc3_sample";
                };
 
                ts_clk: clk@01c20098 {
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun4i-a10-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <32>;
                        status = "disabled";
                };
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun4i-a10-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <33>;
                        status = "disabled";
                };
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun4i-a10-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <34>;
                        status = "disabled";
                };
                mmc3: mmc@01c12000 {
                        compatible = "allwinner,sun4i-a10-mmc";
                        reg = <0x01c12000 0x1000>;
-                       clocks = <&ahb_gates 11>, <&mmc3_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 11>,
+                                <&mmc3_clk 0>,
+                                <&mmc3_clk 1>,
+                                <&mmc3_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <35>;
                        status = "disabled";
                };
index 905f84d141f03213947def68787c42f905e2b54b..2fd8988f310c6e25dc2d215a35ed1e633d95f83a 100644 (file)
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                ts_clk: clk@01c20098 {
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <32>;
                        status = "disabled";
                };
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <33>;
                        status = "disabled";
                };
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <34>;
                        status = "disabled";
                };
index 4910393d1b09013bcb929dc6325d8ddbd6a7d945..f8818f1edbbef27f16adadc139b8e46ae49823ad 100644 (file)
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                ts_clk: clk@01c20098 {
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <32>;
                        status = "disabled";
                };
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <34>;
                        status = "disabled";
                };
index 47e557656993fe99f1ea46667a1c1d27e8a2648f..fa2f403ccf28adf4f6aa10c08978dd3b59b6e709 100644 (file)
                        clock-output-names = "axi";
                };
 
-               ahb1_mux: ahb1_mux@01c20054 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun6i-a31-ahb1-mux-clk";
-                       reg = <0x01c20054 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
-                       clock-output-names = "ahb1_mux";
-               };
-
                ahb1: ahb1@01c20054 {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-ahb-clk";
+                       compatible = "allwinner,sun6i-a31-ahb1-clk";
                        reg = <0x01c20054 0x4>;
-                       clocks = <&ahb1_mux>;
+                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
                        clock-output-names = "ahb1";
                };
 
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 0>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 0>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 0>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                mmc3_clk: clk@01c20094 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20094 0x4>;
                        clocks = <&osc24M>, <&pll6 0>;
-                       clock-output-names = "mmc3";
+                       clock-output-names = "mmc3",
+                                            "mmc3_output",
+                                            "mmc3_sample";
                };
 
                spi0_clk: clk@01c200a0 {
                        #dma-cells = <1>;
 
                        /* DMA controller requires AHB1 clocked from PLL6 */
-                       assigned-clocks = <&ahb1_mux>;
+                       assigned-clocks = <&ahb1>;
                        assigned-clock-parents = <&pll6 0>;
                };
 
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb1_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 8>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb1_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 9>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb1_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 10>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
                mmc3: mmc@01c12000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c12000 0x1000>;
-                       clocks = <&ahb1_gates 11>, <&mmc3_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 11>,
+                                <&mmc3_clk 0>,
+                                <&mmc3_clk 1>,
+                                <&mmc3_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 11>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>;
index 786d491542ac429567fd6db6142406858090f81c..3a8530b79f1c46200d2b7ee8bbe343198c7106d8 100644 (file)
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc0";
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc1";
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc2";
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
                };
 
                mmc3_clk: clk@01c20094 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20094 0x4>;
                        clocks = <&osc24M>, <&pll6 1>, <&pll5 1>;
-                       clock-output-names = "mmc3";
+                       clock-output-names = "mmc3",
+                                            "mmc3_output",
+                                            "mmc3_sample";
                };
 
                ts_clk: clk@01c20098 {
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
                mmc3: mmc@01c12000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c12000 0x1000>;
-                       clocks = <&ahb_gates 11>, <&mmc3_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb_gates 11>,
+                                <&mmc3_clk 0>,
+                                <&mmc3_clk 1>,
+                                <&mmc3_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>;
                        status = "disabled";
                };
index dd34527293e465d4739cf4518bbd8b292a61989b..382ebd137ee4fbe97514362eee3f336d4e253047 100644 (file)
                };
 
                /* dummy clock until actually implemented */
-               pll6: pll6_clk {
+               pll5: pll5_clk {
                        #clock-cells = <0>;
                        compatible = "fixed-clock";
-                       clock-frequency = <600000000>;
-                       clock-output-names = "pll6";
+                       clock-frequency = <0>;
+                       clock-output-names = "pll5";
+               };
+
+               pll6: clk@01c20028 {
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun6i-a31-pll6-clk";
+                       reg = <0x01c20028 0x4>;
+                       clocks = <&osc24M>;
+                       clock-output-names = "pll6", "pll6x2";
                };
 
                cpu: cpu_clk@01c20050 {
                        clock-output-names = "axi";
                };
 
-               ahb1_mux: ahb1_mux_clk@01c20054 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun6i-a31-ahb1-mux-clk";
-                       reg = <0x01c20054 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6>;
-                       clock-output-names = "ahb1_mux";
-               };
-
                ahb1: ahb1_clk@01c20054 {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-ahb-clk";
+                       compatible = "allwinner,sun6i-a31-ahb1-clk";
                        reg = <0x01c20054 0x4>;
-                       clocks = <&ahb1_mux>;
+                       clocks = <&osc32k>, <&osc24M>, <&axi>, <&pll6 0>;
                        clock-output-names = "ahb1";
                };
 
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
+                       clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
                        clock-output-names = "apb2";
                };
 
                };
 
                mmc0_clk: clk@01c20088 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20088 0x4>;
-                       clocks = <&osc24M>, <&pll6>;
-                       clock-output-names = "mmc0";
+                       clocks = <&osc24M>, <&pll6 0>;
+                       clock-output-names = "mmc0",
+                                            "mmc0_output",
+                                            "mmc0_sample";
                };
 
                mmc1_clk: clk@01c2008c {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c2008c 0x4>;
-                       clocks = <&osc24M>, <&pll6>;
-                       clock-output-names = "mmc1";
+                       clocks = <&osc24M>, <&pll6 0>;
+                       clock-output-names = "mmc1",
+                                            "mmc1_output",
+                                            "mmc1_sample";
                };
 
                mmc2_clk: clk@01c20090 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-mod0-clk";
+                       #clock-cells = <1>;
+                       compatible = "allwinner,sun4i-a10-mmc-clk";
                        reg = <0x01c20090 0x4>;
-                       clocks = <&osc24M>, <&pll6>;
-                       clock-output-names = "mmc2";
+                       clocks = <&osc24M>, <&pll6 0>;
+                       clock-output-names = "mmc2",
+                                            "mmc2_output",
+                                            "mmc2_sample";
+               };
+
+               mbus_clk: clk@01c2015c {
+                       #clock-cells = <0>;
+                       compatible = "allwinner,sun8i-a23-mbus-clk";
+                       reg = <0x01c2015c 0x4>;
+                       clocks = <&osc24M>, <&pll6 1>, <&pll5>;
+                       clock-output-names = "mbus";
                };
        };
 
                mmc0: mmc@01c0f000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c0f000 0x1000>;
-                       clocks = <&ahb1_gates 8>, <&mmc0_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 8>,
+                                <&mmc0_clk 0>,
+                                <&mmc0_clk 1>,
+                                <&mmc0_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 8>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>;
                mmc1: mmc@01c10000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c10000 0x1000>;
-                       clocks = <&ahb1_gates 9>, <&mmc1_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 9>,
+                                <&mmc1_clk 0>,
+                                <&mmc1_clk 1>,
+                                <&mmc1_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 9>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 61 IRQ_TYPE_LEVEL_HIGH>;
                mmc2: mmc@01c11000 {
                        compatible = "allwinner,sun5i-a13-mmc";
                        reg = <0x01c11000 0x1000>;
-                       clocks = <&ahb1_gates 10>, <&mmc2_clk>;
-                       clock-names = "ahb", "mmc";
+                       clocks = <&ahb1_gates 10>,
+                                <&mmc2_clk 0>,
+                                <&mmc2_clk 1>,
+                                <&mmc2_clk 2>;
+                       clock-names = "ahb",
+                                     "mmc",
+                                     "output",
+                                     "sample";
                        resets = <&ahb1_rst 10>;
                        reset-names = "ahb";
                        interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>;
index dd9acc95ebc0ef3df443ac9e48b3aa4b06d7b73b..61b53c46edfa7556827ae552082e79ece19df909 100644 (file)
@@ -231,7 +231,7 @@ static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
 /*
  * PMU platform driver and devicetree bindings.
  */
-static struct of_device_id cpu_pmu_of_device_ids[] = {
+static const struct of_device_id cpu_pmu_of_device_ids[] = {
        {.compatible = "arm,cortex-a17-pmu",    .data = armv7_a17_pmu_init},
        {.compatible = "arm,cortex-a15-pmu",    .data = armv7_a15_pmu_init},
        {.compatible = "arm,cortex-a12-pmu",    .data = armv7_a12_pmu_init},
index c6740e359a44e253abea81a8fac9549e7994804f..c74a44324e5bc3dc7cb4b5e0f31e64ca8c167f5f 100644 (file)
@@ -64,7 +64,6 @@ config SOC_SAMA5D4
        select SOC_SAMA5
        select CLKSRC_MMIO
        select CACHE_L2X0
-       select CACHE_PL310
        select HAVE_FB_ATMEL
        select HAVE_AT91_UTMI
        select HAVE_AT91_SMD
index 51761f8927b7a4959468ad0df8c4ddf86565303a..b00d09555f2b7662cb3262ace823a11ccb3fd5e5 100644 (file)
@@ -183,7 +183,7 @@ static struct clock_event_device clkevt = {
 void __iomem *at91_st_base;
 EXPORT_SYMBOL_GPL(at91_st_base);
 
-static struct of_device_id at91rm9200_st_timer_ids[] = {
+static const struct of_device_id at91rm9200_st_timer_ids[] = {
        { .compatible = "atmel,at91rm9200-st" },
        { /* sentinel */ }
 };
index a6e726a6e0b578dc0c6e6dfaa2c996d3baaa59bf..583369ffc284d5588b4c7f4c742cedbefb7614f3 100644 (file)
@@ -35,10 +35,10 @@ extern void __init at91sam9260_pm_init(void);
 extern void __init at91sam9g45_pm_init(void);
 extern void __init at91sam9x5_pm_init(void);
 #else
-void __init at91rm9200_pm_init(void) { }
-void __init at91sam9260_pm_init(void) { }
-void __init at91sam9g45_pm_init(void) { }
-void __init at91sam9x5_pm_init(void) { }
+static inline void __init at91rm9200_pm_init(void) { }
+static inline void __init at91sam9260_pm_init(void) { }
+static inline void __init at91sam9g45_pm_init(void) { }
+static inline void __init at91sam9x5_pm_init(void) { }
 #endif
 
 #endif /* _AT91_GENERIC_H */
index af8d8afc2e12f05d34a395f10408acdec2d8917f..5e34fb1433098aee3916f2b4c3019a14d2aa5ba8 100644 (file)
@@ -226,7 +226,7 @@ void at91_pm_set_standby(void (*at91_standby)(void))
        }
 }
 
-static struct of_device_id ramc_ids[] = {
+static const struct of_device_id ramc_ids[] __initconst = {
        { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby },
        { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby },
        { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby },
@@ -234,7 +234,7 @@ static struct of_device_id ramc_ids[] = {
        { /*sentinel*/ }
 };
 
-static void at91_dt_ramc(void)
+static __init void at91_dt_ramc(void)
 {
        struct device_node *np;
        const struct of_device_id *of_id;
index 19e5a1d9539751c8fba373a88e3f31f558ffd773..4db76a493c5a68e066aa9f664112570dddef5cf3 100644 (file)
@@ -16,7 +16,7 @@
 #include <linux/init.h>
 #include <asm/mach/arch.h>
 
-static const char *axxia_dt_match[] __initconst = {
+static const char *const axxia_dt_match[] __initconst = {
        "lsi,axm5516",
        "lsi,axm5516-sim",
        "lsi,axm5516-emu",
index aaeec78c3ec4d05df74531a36245d21c4b9e2dd8..8b11f44bb36e5a3dcfe59cf331e18730e71c9ec5 100644 (file)
@@ -68,7 +68,7 @@ config ARCH_BCM_MOBILE
          This enables support for systems based on Broadcom mobile SoCs.
 
 config ARCH_BCM_281XX
-       bool "Broadcom BCM281XX SoC family"
+       bool "Broadcom BCM281XX SoC family" if ARCH_MULTI_V7
        select ARCH_BCM_MOBILE
        select HAVE_SMP
        help
@@ -77,7 +77,7 @@ config ARCH_BCM_281XX
          variants.
 
 config ARCH_BCM_21664
-       bool "Broadcom BCM21664 SoC family"
+       bool "Broadcom BCM21664 SoC family" if ARCH_MULTI_V7
        select ARCH_BCM_MOBILE
        select HAVE_SMP
        help
index 60a5afa06ed7f90428729c2daa1ef59beeeb1c0d..3a60f7ee3f0cc1583788f9cd3da81a5723354647 100644 (file)
@@ -17,7 +17,7 @@
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 
-static const char *brcmstb_match[] __initconst = {
+static const char *const brcmstb_match[] __initconst = {
        "brcm,bcm7445",
        "brcm,brcmstb",
        NULL
index 584e8d4e28926956bed6971713d8f2f3758f59a7..cd30f6f5f2ff15723a2abae07b471b03f65f9b11 100644 (file)
@@ -32,12 +32,14 @@ config ARCH_DAVINCI_DM646x
 
 config ARCH_DAVINCI_DA830
        bool "DA830/OMAP-L137/AM17x based system"
+       depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR
        select ARCH_DAVINCI_DA8XX
        select CPU_DCACHE_WRITETHROUGH # needed on silicon revs 1.0, 1.1
        select CP_INTC
 
 config ARCH_DAVINCI_DA850
        bool "DA850/OMAP-L138/AM18x based system"
+       depends on !ARCH_DAVINCI_DMx || AUTO_ZRELADDR
        select ARCH_DAVINCI_DA8XX
        select CP_INTC
 
index f703d82f08a80d22adc3e8a1f45acbd7fbfa846c..438f68547f4c79ea4a12b0bf82bd85583c02be62 100644 (file)
@@ -20,7 +20,7 @@
 
 #define DA8XX_NUM_UARTS        3
 
-static struct of_device_id da8xx_irq_match[] __initdata = {
+static const struct of_device_id da8xx_irq_match[] __initconst = {
        { .compatible = "ti,cp-intc", .data = cp_intc_of_init, },
        { }
 };
index a8eb909a2b6ccd94dd7f66c0c11370afc9503184..6a2ff0a654a5b53efce08a40c60a1d25f603b166 100644 (file)
@@ -30,7 +30,7 @@ static void __iomem *pinmux_base;
 /*
  * Sets the DAVINCI MUX register based on the table
  */
-int __init_or_module davinci_cfg_reg(const unsigned long index)
+int davinci_cfg_reg(const unsigned long index)
 {
        static DEFINE_SPINLOCK(mux_spin_lock);
        struct davinci_soc_info *soc_info = &davinci_soc_info;
@@ -101,7 +101,7 @@ int __init_or_module davinci_cfg_reg(const unsigned long index)
 }
 EXPORT_SYMBOL(davinci_cfg_reg);
 
-int __init_or_module davinci_cfg_reg_list(const short pins[])
+int davinci_cfg_reg_list(const short pins[])
 {
        int i, error = -EINVAL;
 
index 2013f73797ed6c24d8c6f5f5cd850b96cb1f9869..9e9dfdfad9d77fd670fd186d61907f503815cde3 100644 (file)
@@ -227,7 +227,7 @@ static void __init exynos_dt_machine_init(void)
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
 
-static char const *exynos_dt_compat[] __initconst = {
+static char const *const exynos_dt_compat[] __initconst = {
        "samsung,exynos3",
        "samsung,exynos3250",
        "samsung,exynos4",
index 666ec3e5b03fbc05592bf66d82b4bd8d311c41de..52e2b1a2fddbfcf7485d1c504c4e66cec3609388 100644 (file)
@@ -587,7 +587,7 @@ static struct exynos_pm_data exynos5420_pm_data = {
        .cpu_suspend    = exynos5420_cpu_suspend,
 };
 
-static struct of_device_id exynos_pmu_of_device_ids[] = {
+static const struct of_device_id exynos_pmu_of_device_ids[] __initconst = {
        {
                .compatible = "samsung,exynos3250-pmu",
                .data = &exynos3250_pm_data,
index 07a09570175d8b034082a3639ddfa01999bf59d3..231fba0d03e5135466aa55bd765f684b8900084e 100644 (file)
@@ -169,7 +169,7 @@ static void __init highbank_init(void)
                platform_device_register(&highbank_cpuidle_device);
 }
 
-static const char *highbank_match[] __initconst = {
+static const char *const highbank_match[] __initconst = {
        "calxeda,highbank",
        "calxeda,ecx-2000",
        NULL,
index 76b907078b58d365a1acaa73ac85e98e9862833f..c6bd7c7bd4aa4949203505247f0fd9db21ea0c5b 100644 (file)
@@ -45,7 +45,7 @@ static void __init hi3620_map_io(void)
        iotable_init(hi3620_io_desc, ARRAY_SIZE(hi3620_io_desc));
 }
 
-static const char *hi3xxx_compat[] __initconst = {
+static const char *const hi3xxx_compat[] __initconst = {
        "hisilicon,hi3620-hi4511",
        NULL,
 };
@@ -55,7 +55,7 @@ DT_MACHINE_START(HI3620, "Hisilicon Hi3620 (Flattened Device Tree)")
        .dt_compat      = hi3xxx_compat,
 MACHINE_END
 
-static const char *hix5hd2_compat[] __initconst = {
+static const char *const hix5hd2_compat[] __initconst = {
        "hisilicon,hix5hd2",
        NULL,
 };
@@ -64,7 +64,7 @@ DT_MACHINE_START(HIX5HD2_DT, "Hisilicon HIX5HD2 (Flattened Device Tree)")
        .dt_compat      = hix5hd2_compat,
 MACHINE_END
 
-static const char *hip04_compat[] __initconst = {
+static const char *const hip04_compat[] __initconst = {
        "hisilicon,hip04-d01",
        NULL,
 };
@@ -73,7 +73,7 @@ DT_MACHINE_START(HIP04, "Hisilicon HiP04 (Flattened Device Tree)")
        .dt_compat      = hip04_compat,
 MACHINE_END
 
-static const char *hip01_compat[] __initconst = {
+static const char *const hip01_compat[] __initconst = {
        "hisilicon,hip01",
        "hisilicon,hip01-ca9x2",
        NULL,
index a377f95033aecc6edaab43a7d012df3997b27017..0411f0664c15c0bce0469b9cd073205834ba366a 100644 (file)
@@ -68,7 +68,7 @@ int imx_mmdc_get_ddr_type(void)
        return ddr_type;
 }
 
-static struct of_device_id imx_mmdc_dt_ids[] = {
+static const struct of_device_id imx_mmdc_dt_ids[] = {
        { .compatible = "fsl,imx6q-mmdc", },
        { /* sentinel */ }
 };
index 6a722860e34dc2a4fb0d79f095656067d191bd4a..b024390199639ca31201730ccdbcfceae577be2b 100644 (file)
@@ -245,8 +245,10 @@ static inline void outb(u8 value, u32 addr)
 }
 
 #define outsb outsb
-static inline void outsb(u32 io_addr, const u8 *vaddr, u32 count)
+static inline void outsb(u32 io_addr, const void *p, u32 count)
 {
+       const u8 *vaddr = p;
+
        while (count--)
                outb(*vaddr++, io_addr);
 }
@@ -262,8 +264,9 @@ static inline void outw(u16 value, u32 addr)
 }
 
 #define outsw outsw
-static inline void outsw(u32 io_addr, const u16 *vaddr, u32 count)
+static inline void outsw(u32 io_addr, const void *p, u32 count)
 {
+       const u16 *vaddr = p;
        while (count--)
                outw(cpu_to_le16(*vaddr++), io_addr);
 }
@@ -275,8 +278,9 @@ static inline void outl(u32 value, u32 addr)
 }
 
 #define outsl outsl
-static inline void outsl(u32 io_addr, const u32 *vaddr, u32 count)
+static inline void outsl(u32 io_addr, const void *p, u32 count)
 {
+       const u32 *vaddr = p;
        while (count--)
                outl(cpu_to_le32(*vaddr++), io_addr);
 }
@@ -294,8 +298,9 @@ static inline u8 inb(u32 addr)
 }
 
 #define insb insb
-static inline void insb(u32 io_addr, u8 *vaddr, u32 count)
+static inline void insb(u32 io_addr, void *p, u32 count)
 {
+       u8 *vaddr = p;
        while (count--)
                *vaddr++ = inb(io_addr);
 }
@@ -313,8 +318,9 @@ static inline u16 inw(u32 addr)
 }
 
 #define insw insw
-static inline void insw(u32 io_addr, u16 *vaddr, u32 count)
+static inline void insw(u32 io_addr, void *p, u32 count)
 {
+       u16 *vaddr = p;
        while (count--)
                *vaddr++ = le16_to_cpu(inw(io_addr));
 }
@@ -330,8 +336,9 @@ static inline u32 inl(u32 addr)
 }
 
 #define insl insl
-static inline void insl(u32 io_addr, u32 *vaddr, u32 count)
+static inline void insl(u32 io_addr, void *p, u32 count)
 {
+       u32 *vaddr = p;
        while (count--)
                *vaddr++ = le32_to_cpu(inl(io_addr));
 }
index 7f352de2609909ad6c5087aaa203f744876fd4b7..06620875813ae93c76dee1628c94d5439e94783e 100644 (file)
@@ -103,7 +103,7 @@ static void __init keystone_init_meminfo(void)
        pr_info("Switching to high address space at 0x%llx\n", (u64)offset);
 }
 
-static const char *keystone_match[] __initconst = {
+static const char *const keystone_match[] __initconst = {
        "ti,keystone",
        NULL,
 };
index ef6041e7e6754c5daf8c07d7f91309f32ce4f4b1..41bebfd296dcbacac7a9d0c60747d5b1c01c73d4 100644 (file)
@@ -61,7 +61,7 @@ static struct pm_clk_notifier_block platform_domain_notifier = {
        .pm_domain = &keystone_pm_domain,
 };
 
-static struct of_device_id of_keystone_table[] = {
+static const struct of_device_id of_keystone_table[] = {
        {.compatible = "ti,keystone"},
        { /* end of list */ },
 };
index 2756351dbb35acac76190f3342a92e006d77c9d6..10bfa03e58d4777a03377391a63f8a69ec2e3c78 100644 (file)
@@ -213,7 +213,7 @@ void __init timer_init(int irq)
 }
 
 #ifdef CONFIG_OF
-static struct of_device_id mmp_timer_dt_ids[] = {
+static const struct of_device_id mmp_timer_dt_ids[] = {
        { .compatible = "mrvl,mmp-timer", },
        {}
 };
index b5895f040caaf73a9f304f91c0fca2533a9dcc86..e46e9ea1e187ecfbbeee56c68a882c68076f28b5 100644 (file)
@@ -51,7 +51,7 @@ enum {
        COHERENCY_FABRIC_TYPE_ARMADA_380,
 };
 
-static struct of_device_id of_coherency_table[] = {
+static const struct of_device_id of_coherency_table[] = {
        {.compatible = "marvell,coherency-fabric",
         .data = (void *) COHERENCY_FABRIC_TYPE_ARMADA_370_XP },
        {.compatible = "marvell,armada-375-coherency-fabric",
index d8ab605a44fa7f80a87edbd2ffeadd386903ea16..8b9f5e202ccf67d34681a2e1966fc424680a417f 100644 (file)
@@ -104,7 +104,7 @@ static void __iomem *pmsu_mp_base;
 
 static void *mvebu_cpu_resume;
 
-static struct of_device_id of_pmsu_table[] = {
+static const struct of_device_id of_pmsu_table[] = {
        { .compatible = "marvell,armada-370-pmsu", },
        { .compatible = "marvell,armada-370-xp-pmsu", },
        { .compatible = "marvell,armada-380-pmsu", },
index a068cb5c2ce809c285d254a3f6fb924bb9f4ed30..c6c132acd7a61a052e27dce226d5b8bb62648307 100644 (file)
@@ -126,7 +126,7 @@ int mvebu_system_controller_get_soc_id(u32 *dev, u32 *rev)
                return -ENODEV;
 }
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && defined(CONFIG_MACH_MVEBU_V7)
 void mvebu_armada375_smp_wa_init(void)
 {
        u32 dev, rev;
index 3d24ebf120953d936fa5785fdcd199fcfbf8f1ba..3445a5686805e082d6464aa35df2bb0389b3e276 100644 (file)
@@ -27,7 +27,7 @@
 #include "mmio.h"
 #include "clcd.h"
 
-static const char *nspire_dt_match[] __initconst = {
+static const char *const nspire_dt_match[] __initconst = {
        "ti,nspire",
        "ti,nspire-cx",
        "ti,nspire-tp",
index 00d5d8f9f150ed8e8a475c9103bdc74da2f3cdf1..b83f18fcec9b3c693944a6a2d0aafccf8e660a75 100644 (file)
@@ -190,7 +190,7 @@ obj-$(CONFIG_SOC_OMAP2430)          += clock2430.o
 obj-$(CONFIG_ARCH_OMAP3)               += $(clock-common) clock3xxx.o
 obj-$(CONFIG_ARCH_OMAP3)               += clock34xx.o clkt34xx_dpll3m2.o
 obj-$(CONFIG_ARCH_OMAP3)               += clock3517.o clock36xx.o
-obj-$(CONFIG_ARCH_OMAP3)               += dpll3xxx.o cclock3xxx_data.o
+obj-$(CONFIG_ARCH_OMAP3)               += dpll3xxx.o
 obj-$(CONFIG_ARCH_OMAP3)               += clkt_iclk.o
 obj-$(CONFIG_ARCH_OMAP4)               += $(clock-common)
 obj-$(CONFIG_ARCH_OMAP4)               += dpll3xxx.o dpll44xx.o
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
deleted file mode 100644 (file)
index e79c80b..0000000
+++ /dev/null
@@ -1,3688 +0,0 @@
-/*
- * OMAP3 clock data
- *
- * Copyright (C) 2007-2012 Texas Instruments, Inc.
- * Copyright (C) 2007-2011 Nokia Corporation
- *
- * Written by Paul Walmsley
- * Updated to COMMON clk data format by Rajendra Nayak <rnayak@ti.com>
- * With many device clock fixes by Kevin Hilman and Jouni Högander
- * DPLL bypass clock support added by Roman Tereshonkov
- *
- */
-
-/*
- * Virtual clocks are introduced as convenient tools.
- * They are sources for other clocks and not supposed
- * to be requested from drivers directly.
- */
-
-#include <linux/kernel.h>
-#include <linux/clk.h>
-#include <linux/clk-private.h>
-#include <linux/list.h>
-#include <linux/io.h>
-
-#include "soc.h"
-#include "iomap.h"
-#include "clock.h"
-#include "clock3xxx.h"
-#include "clock34xx.h"
-#include "clock36xx.h"
-#include "clock3517.h"
-#include "cm3xxx.h"
-#include "cm-regbits-34xx.h"
-#include "prm3xxx.h"
-#include "prm-regbits-34xx.h"
-#include "control.h"
-
-/*
- * clocks
- */
-
-#define OMAP_CM_REGADDR                OMAP34XX_CM_REGADDR
-
-/* Maximum DPLL multiplier, divider values for OMAP3 */
-#define OMAP3_MAX_DPLL_MULT            2047
-#define OMAP3630_MAX_JTYPE_DPLL_MULT   4095
-#define OMAP3_MAX_DPLL_DIV             128
-
-DEFINE_CLK_FIXED_RATE(dummy_apb_pclk, CLK_IS_ROOT, 0x0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(mcbsp_clks, CLK_IS_ROOT, 0x0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(omap_32k_fck, CLK_IS_ROOT, 32768, 0x0);
-
-DEFINE_CLK_FIXED_RATE(pclk_ck, CLK_IS_ROOT, 27000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(rmii_ck, CLK_IS_ROOT, 50000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(secure_32k_fck, CLK_IS_ROOT, 32768, 0x0);
-
-DEFINE_CLK_FIXED_RATE(sys_altclk, CLK_IS_ROOT, 0x0, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_12m_ck, CLK_IS_ROOT, 12000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_13m_ck, CLK_IS_ROOT, 13000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_16_8m_ck, CLK_IS_ROOT, 16800000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_19200000_ck, CLK_IS_ROOT, 19200000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_26000000_ck, CLK_IS_ROOT, 26000000, 0x0);
-
-DEFINE_CLK_FIXED_RATE(virt_38_4m_ck, CLK_IS_ROOT, 38400000, 0x0);
-
-static const char *osc_sys_ck_parent_names[] = {
-       "virt_12m_ck", "virt_13m_ck", "virt_19200000_ck", "virt_26000000_ck",
-       "virt_38_4m_ck", "virt_16_8m_ck",
-};
-
-DEFINE_CLK_MUX(osc_sys_ck, osc_sys_ck_parent_names, NULL, 0x0,
-              OMAP3430_PRM_CLKSEL, OMAP3430_SYS_CLKIN_SEL_SHIFT,
-              OMAP3430_SYS_CLKIN_SEL_WIDTH, 0x0, NULL);
-
-DEFINE_CLK_DIVIDER(sys_ck, "osc_sys_ck", &osc_sys_ck, 0x0,
-                  OMAP3430_PRM_CLKSRC_CTRL, OMAP_SYSCLKDIV_SHIFT,
-                  OMAP_SYSCLKDIV_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct dpll_data dpll3_dd = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-       .mult_mask      = OMAP3430_CORE_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_CORE_DPLL_DIV_MASK,
-       .clk_bypass     = &sys_ck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430_CORE_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_mask    = OMAP3430_EN_CORE_DPLL_MASK,
-       .auto_recal_bit = OMAP3430_EN_CORE_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_CORE_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_CORE_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
-       .autoidle_mask  = OMAP3430_AUTO_CORE_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
-       .idlest_mask    = OMAP3430_ST_CORE_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll3_ck;
-
-static const char *dpll3_ck_parent_names[] = {
-       "sys_ck",
-       "sys_ck",
-};
-
-static const struct clk_ops dpll3_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .get_parent     = &omap2_init_dpll_parent,
-       .recalc_rate    = &omap3_dpll_recalc,
-       .round_rate     = &omap2_dpll_round_rate,
-};
-
-static struct clk_hw_omap dpll3_ck_hw = {
-       .hw = {
-               .clk = &dpll3_ck,
-       },
-       .ops            = &clkhwops_omap3_dpll,
-       .dpll_data      = &dpll3_dd,
-       .clkdm_name     = "dpll3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll3_ck, dpll3_ck_parent_names, dpll3_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll3_m2_ck, "dpll3_ck", &dpll3_ck, 0x0,
-                  OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-                  OMAP3430_CORE_DPLL_CLKOUT_DIV_SHIFT,
-                  OMAP3430_CORE_DPLL_CLKOUT_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk core_ck;
-
-static const char *core_ck_parent_names[] = {
-       "dpll3_m2_ck",
-};
-
-static const struct clk_ops core_ck_ops = {};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_ck, NULL);
-DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops);
-
-DEFINE_CLK_DIVIDER(l3_ick, "core_ck", &core_ck, 0x0,
-                  OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_L3_SHIFT, OMAP3430_CLKSEL_L3_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(l4_ick, "l3_ick", &l3_ick, 0x0,
-                  OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_L4_SHIFT, OMAP3430_CLKSEL_L4_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk security_l4_ick2;
-
-static const char *security_l4_ick2_parent_names[] = {
-       "l4_ick",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(security_l4_ick2, NULL);
-DEFINE_STRUCT_CLK(security_l4_ick2, security_l4_ick2_parent_names, core_ck_ops);
-
-static struct clk aes1_ick;
-
-static const char *aes1_ick_parent_names[] = {
-       "security_l4_ick2",
-};
-
-static const struct clk_ops aes1_ick_ops = {
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-};
-
-static struct clk_hw_omap aes1_ick_hw = {
-       .hw = {
-               .clk = &aes1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_AES1_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(aes1_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk core_l4_ick;
-
-static const struct clk_ops core_l4_ick_ops = {
-       .init           = &omap2_init_clk_clkdm,
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_l4_ick, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk aes2_ick;
-
-static const char *aes2_ick_parent_names[] = {
-       "core_l4_ick",
-};
-
-static const struct clk_ops aes2_ick_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-};
-
-static struct clk_hw_omap aes2_ick_hw = {
-       .hw = {
-               .clk = &aes2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_AES2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(aes2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk dpll1_fck;
-
-static struct dpll_data dpll1_dd = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
-       .mult_mask      = OMAP3430_MPU_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_MPU_DPLL_DIV_MASK,
-       .clk_bypass     = &dpll1_fck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430_MPU_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKEN_PLL),
-       .enable_mask    = OMAP3430_EN_MPU_DPLL_MASK,
-       .modes          = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
-       .auto_recal_bit = OMAP3430_EN_MPU_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_MPU_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_MPU_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_AUTOIDLE_PLL),
-       .autoidle_mask  = OMAP3430_AUTO_MPU_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
-       .idlest_mask    = OMAP3430_ST_MPU_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll1_ck;
-
-static const struct clk_ops dpll1_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap3_noncore_dpll_enable,
-       .disable        = &omap3_noncore_dpll_disable,
-       .get_parent     = &omap2_init_dpll_parent,
-       .recalc_rate    = &omap3_dpll_recalc,
-       .set_rate       = &omap3_noncore_dpll_set_rate,
-       .set_parent     = &omap3_noncore_dpll_set_parent,
-       .set_rate_and_parent    = &omap3_noncore_dpll_set_rate_and_parent,
-       .determine_rate = &omap3_noncore_dpll_determine_rate,
-       .round_rate     = &omap2_dpll_round_rate,
-};
-
-static struct clk_hw_omap dpll1_ck_hw = {
-       .hw = {
-               .clk = &dpll1_ck,
-       },
-       .ops            = &clkhwops_omap3_dpll,
-       .dpll_data      = &dpll1_dd,
-       .clkdm_name     = "dpll1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll1_ck, dpll3_ck_parent_names, dpll1_ck_ops);
-
-DEFINE_CLK_FIXED_FACTOR(dpll1_x2_ck, "dpll1_ck", &dpll1_ck, 0x0, 2, 1);
-
-DEFINE_CLK_DIVIDER(dpll1_x2m2_ck, "dpll1_x2_ck", &dpll1_x2_ck, 0x0,
-                  OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL2_PLL),
-                  OMAP3430_MPU_DPLL_CLKOUT_DIV_SHIFT,
-                  OMAP3430_MPU_DPLL_CLKOUT_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk mpu_ck;
-
-static const char *mpu_ck_parent_names[] = {
-       "dpll1_x2m2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(mpu_ck, "mpu_clkdm");
-DEFINE_STRUCT_CLK(mpu_ck, mpu_ck_parent_names, core_l4_ick_ops);
-
-DEFINE_CLK_DIVIDER(arm_fck, "mpu_ck", &mpu_ck, 0x0,
-                  OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_IDLEST_PLL),
-                  OMAP3430_ST_MPU_CLK_SHIFT, OMAP3430_ST_MPU_CLK_WIDTH,
-                  0x0, NULL);
-
-static struct clk cam_ick;
-
-static struct clk_hw_omap cam_ick_hw = {
-       .hw = {
-               .clk = &cam_ick,
-       },
-       .ops            = &clkhwops_iclk,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_CAM_SHIFT,
-       .clkdm_name     = "cam_clkdm",
-};
-
-DEFINE_STRUCT_CLK(cam_ick, security_l4_ick2_parent_names, aes2_ick_ops);
-
-/* DPLL4 */
-/* Supplies 96MHz, 54Mhz TV DAC, DSS fclk, CAM sensor clock, emul trace clk */
-/* Type: DPLL */
-static struct dpll_data dpll4_dd;
-
-static struct dpll_data dpll4_dd_34xx __initdata = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
-       .mult_mask      = OMAP3430_PERIPH_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_PERIPH_DPLL_DIV_MASK,
-       .clk_bypass     = &sys_ck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430_PERIPH_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_mask    = OMAP3430_EN_PERIPH_DPLL_MASK,
-       .modes          = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
-       .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_PERIPH_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
-       .autoidle_mask  = OMAP3430_AUTO_PERIPH_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
-       .idlest_mask    = OMAP3430_ST_PERIPH_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct dpll_data dpll4_dd_3630 __initdata = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL2),
-       .mult_mask      = OMAP3630_PERIPH_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_PERIPH_DPLL_DIV_MASK,
-       .clk_bypass     = &sys_ck,
-       .clk_ref        = &sys_ck,
-       .control_reg    = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_mask    = OMAP3430_EN_PERIPH_DPLL_MASK,
-       .modes          = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
-       .auto_recal_bit = OMAP3430_EN_PERIPH_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_PERIPH_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_PERIPH_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(PLL_MOD, CM_AUTOIDLE),
-       .autoidle_mask  = OMAP3430_AUTO_PERIPH_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST),
-       .idlest_mask    = OMAP3430_ST_PERIPH_CLK_MASK,
-       .dco_mask       = OMAP3630_PERIPH_DPLL_DCO_SEL_MASK,
-       .sddiv_mask     = OMAP3630_PERIPH_DPLL_SD_DIV_MASK,
-       .max_multiplier = OMAP3630_MAX_JTYPE_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-       .flags          = DPLL_J_TYPE
-};
-
-static struct clk dpll4_ck;
-
-static const struct clk_ops dpll4_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap3_noncore_dpll_enable,
-       .disable        = &omap3_noncore_dpll_disable,
-       .get_parent     = &omap2_init_dpll_parent,
-       .recalc_rate    = &omap3_dpll_recalc,
-       .set_rate       = &omap3_dpll4_set_rate,
-       .set_parent     = &omap3_noncore_dpll_set_parent,
-       .set_rate_and_parent    = &omap3_dpll4_set_rate_and_parent,
-       .determine_rate = &omap3_noncore_dpll_determine_rate,
-       .round_rate     = &omap2_dpll_round_rate,
-};
-
-static struct clk_hw_omap dpll4_ck_hw = {
-       .hw = {
-               .clk = &dpll4_ck,
-       },
-       .dpll_data      = &dpll4_dd,
-       .ops            = &clkhwops_omap3_dpll,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_ck, dpll3_ck_parent_names, dpll4_ck_ops);
-
-static const struct clk_div_table dpll4_mx_ck_div_table[] = {
-       { .div = 1, .val = 1 },
-       { .div = 2, .val = 2 },
-       { .div = 3, .val = 3 },
-       { .div = 4, .val = 4 },
-       { .div = 5, .val = 5 },
-       { .div = 6, .val = 6 },
-       { .div = 7, .val = 7 },
-       { .div = 8, .val = 8 },
-       { .div = 9, .val = 9 },
-       { .div = 10, .val = 10 },
-       { .div = 11, .val = 11 },
-       { .div = 12, .val = 12 },
-       { .div = 13, .val = 13 },
-       { .div = 14, .val = 14 },
-       { .div = 15, .val = 15 },
-       { .div = 16, .val = 16 },
-       { .div = 17, .val = 17 },
-       { .div = 18, .val = 18 },
-       { .div = 19, .val = 19 },
-       { .div = 20, .val = 20 },
-       { .div = 21, .val = 21 },
-       { .div = 22, .val = 22 },
-       { .div = 23, .val = 23 },
-       { .div = 24, .val = 24 },
-       { .div = 25, .val = 25 },
-       { .div = 26, .val = 26 },
-       { .div = 27, .val = 27 },
-       { .div = 28, .val = 28 },
-       { .div = 29, .val = 29 },
-       { .div = 30, .val = 30 },
-       { .div = 31, .val = 31 },
-       { .div = 32, .val = 32 },
-       { .div = 0 },
-};
-
-DEFINE_CLK_DIVIDER(dpll4_m5_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_CAM_SHIFT, OMAP3630_CLKSEL_CAM_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll4_m5x2_ck;
-
-static const char *dpll4_m5x2_ck_parent_names[] = {
-       "dpll4_m5_ck",
-};
-
-static const struct clk_ops dpll4_m5x2_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .set_rate       = &omap3_clkoutx2_set_rate,
-       .recalc_rate    = &omap3_clkoutx2_recalc,
-       .round_rate     = &omap3_clkoutx2_round_rate,
-};
-
-static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap36xx_pwrdn_clk_enable_with_hsdiv_restore,
-       .disable        = &omap2_dflt_clk_disable,
-       .recalc_rate    = &omap3_clkoutx2_recalc,
-};
-
-static struct clk_hw_omap dpll4_m5x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m5x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_CAM_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dpll4_m5x2_ck, dpll4_m5x2_ck_parent_names,
-                       dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
-
-static struct clk dpll4_m5x2_ck_3630 = {
-       .name           = "dpll4_m5x2_ck",
-       .hw             = &dpll4_m5x2_ck_hw.hw,
-       .parent_names   = dpll4_m5x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m5x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-       .flags          = CLK_SET_RATE_PARENT,
-};
-
-static struct clk cam_mclk;
-
-static const char *cam_mclk_parent_names[] = {
-       "dpll4_m5x2_ck",
-};
-
-static struct clk_hw_omap cam_mclk_hw = {
-       .hw = {
-               .clk = &cam_mclk,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_CAM_SHIFT,
-       .clkdm_name     = "cam_clkdm",
-};
-
-static struct clk cam_mclk = {
-       .name           = "cam_mclk",
-       .hw             = &cam_mclk_hw.hw,
-       .parent_names   = cam_mclk_parent_names,
-       .num_parents    = ARRAY_SIZE(cam_mclk_parent_names),
-       .ops            = &aes2_ick_ops,
-       .flags          = CLK_SET_RATE_PARENT,
-};
-
-static const struct clksel_rate clkout2_src_core_rates[] = {
-       { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_sys_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate clkout2_src_96m_rates[] = {
-       { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-DEFINE_CLK_DIVIDER(dpll4_m2_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(PLL_MOD, OMAP3430_CM_CLKSEL3),
-                  OMAP3430_DIV_96M_SHIFT, OMAP3630_DIV_96M_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll4_m2x2_ck;
-
-static const char *dpll4_m2x2_ck_parent_names[] = {
-       "dpll4_m2_ck",
-};
-
-static struct clk_hw_omap dpll4_m2x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m2x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_96M_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_m2x2_ck, dpll4_m2x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll4_m2x2_ck_3630 = {
-       .name           = "dpll4_m2x2_ck",
-       .hw             = &dpll4_m2x2_ck_hw.hw,
-       .parent_names   = dpll4_m2x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m2x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-};
-
-static struct clk omap_96m_alwon_fck;
-
-static const char *omap_96m_alwon_fck_parent_names[] = {
-       "dpll4_m2x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(omap_96m_alwon_fck, NULL);
-DEFINE_STRUCT_CLK(omap_96m_alwon_fck, omap_96m_alwon_fck_parent_names,
-                 core_ck_ops);
-
-static struct clk cm_96m_fck;
-
-static const char *cm_96m_fck_parent_names[] = {
-       "omap_96m_alwon_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(cm_96m_fck, NULL);
-DEFINE_STRUCT_CLK(cm_96m_fck, cm_96m_fck_parent_names, core_ck_ops);
-
-static const struct clksel_rate clkout2_src_54m_rates[] = {
-       { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-DEFINE_CLK_DIVIDER_TABLE(dpll4_m3_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_TV_SHIFT, OMAP3630_CLKSEL_TV_WIDTH,
-                  0, dpll4_mx_ck_div_table, NULL);
-
-static struct clk dpll4_m3x2_ck;
-
-static const char *dpll4_m3x2_ck_parent_names[] = {
-       "dpll4_m3_ck",
-};
-
-static struct clk_hw_omap dpll4_m3x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m3x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_TV_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_m3x2_ck, dpll4_m3x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll4_m3x2_ck_3630 = {
-       .name           = "dpll4_m3x2_ck",
-       .hw             = &dpll4_m3x2_ck_hw.hw,
-       .parent_names   = dpll4_m3x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m3x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-};
-
-static const char *omap_54m_fck_parent_names[] = {
-       "dpll4_m3x2_ck", "sys_altclk",
-};
-
-DEFINE_CLK_MUX(omap_54m_fck, omap_54m_fck_parent_names, NULL, 0x0,
-              OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), OMAP3430_SOURCE_54M_SHIFT,
-              OMAP3430_SOURCE_54M_WIDTH, 0x0, NULL);
-
-static const struct clksel clkout2_src_clksel[] = {
-       { .parent = &core_ck, .rates = clkout2_src_core_rates },
-       { .parent = &sys_ck, .rates = clkout2_src_sys_rates },
-       { .parent = &cm_96m_fck, .rates = clkout2_src_96m_rates },
-       { .parent = &omap_54m_fck, .rates = clkout2_src_54m_rates },
-       { .parent = NULL },
-};
-
-static const char *clkout2_src_ck_parent_names[] = {
-       "core_ck", "sys_ck", "cm_96m_fck", "omap_54m_fck",
-};
-
-static const struct clk_ops clkout2_src_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(clkout2_src_ck, "core_clkdm",
-                        clkout2_src_clksel, OMAP3430_CM_CLKOUT_CTRL,
-                        OMAP3430_CLKOUT2SOURCE_MASK,
-                        OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_EN_SHIFT,
-                        NULL, clkout2_src_ck_parent_names, clkout2_src_ck_ops);
-
-static const struct clksel_rate omap_48m_cm96m_rates[] = {
-       { .div = 2, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate omap_48m_alt_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel omap_48m_clksel[] = {
-       { .parent = &cm_96m_fck, .rates = omap_48m_cm96m_rates },
-       { .parent = &sys_altclk, .rates = omap_48m_alt_rates },
-       { .parent = NULL },
-};
-
-static const char *omap_48m_fck_parent_names[] = {
-       "cm_96m_fck", "sys_altclk",
-};
-
-static struct clk omap_48m_fck;
-
-static const struct clk_ops omap_48m_fck_ops = {
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-static struct clk_hw_omap omap_48m_fck_hw = {
-       .hw = {
-               .clk = &omap_48m_fck,
-       },
-       .clksel         = omap_48m_clksel,
-       .clksel_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-       .clksel_mask    = OMAP3430_SOURCE_48M_MASK,
-};
-
-DEFINE_STRUCT_CLK(omap_48m_fck, omap_48m_fck_parent_names, omap_48m_fck_ops);
-
-DEFINE_CLK_FIXED_FACTOR(omap_12m_fck, "omap_48m_fck", &omap_48m_fck, 0x0, 1, 4);
-
-static struct clk core_12m_fck;
-
-static const char *core_12m_fck_parent_names[] = {
-       "omap_12m_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_12m_fck, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_12m_fck, core_12m_fck_parent_names, core_l4_ick_ops);
-
-static struct clk core_48m_fck;
-
-static const char *core_48m_fck_parent_names[] = {
-       "omap_48m_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_48m_fck, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops);
-
-static const char *omap_96m_fck_parent_names[] = {
-       "cm_96m_fck", "sys_ck",
-};
-
-DEFINE_CLK_MUX(omap_96m_fck, omap_96m_fck_parent_names, NULL, 0x0,
-              OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
-              OMAP3430_SOURCE_96M_SHIFT, OMAP3430_SOURCE_96M_WIDTH, 0x0, NULL);
-
-static struct clk core_96m_fck;
-
-static const char *core_96m_fck_parent_names[] = {
-       "omap_96m_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_96m_fck, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(core_96m_fck, core_96m_fck_parent_names, core_l4_ick_ops);
-
-static struct clk core_l3_ick;
-
-static const char *core_l3_ick_parent_names[] = {
-       "l3_ick",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(core_l3_ick, "core_l3_clkdm");
-DEFINE_STRUCT_CLK(core_l3_ick, core_l3_ick_parent_names, core_l4_ick_ops);
-
-DEFINE_CLK_FIXED_FACTOR(dpll3_m2x2_ck, "dpll3_m2_ck", &dpll3_m2_ck, 0x0, 2, 1);
-
-static struct clk corex2_fck;
-
-static const char *corex2_fck_parent_names[] = {
-       "dpll3_m2x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(corex2_fck, NULL);
-DEFINE_STRUCT_CLK(corex2_fck, corex2_fck_parent_names, core_ck_ops);
-
-static const char *cpefuse_fck_parent_names[] = {
-       "sys_ck",
-};
-
-static struct clk cpefuse_fck;
-
-static struct clk_hw_omap cpefuse_fck_hw = {
-       .hw = {
-               .clk = &cpefuse_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
-       .enable_bit     = OMAP3430ES2_EN_CPEFUSE_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(cpefuse_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk csi2_96m_fck;
-
-static const char *csi2_96m_fck_parent_names[] = {
-       "core_96m_fck",
-};
-
-static struct clk_hw_omap csi2_96m_fck_hw = {
-       .hw = {
-               .clk = &csi2_96m_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_CAM_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_CSI2_SHIFT,
-       .clkdm_name     = "cam_clkdm",
-};
-
-DEFINE_STRUCT_CLK(csi2_96m_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk d2d_26m_fck;
-
-static struct clk_hw_omap d2d_26m_fck_hw = {
-       .hw = {
-               .clk = &d2d_26m_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430ES1_EN_D2D_SHIFT,
-       .clkdm_name     = "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(d2d_26m_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk des1_ick;
-
-static struct clk_hw_omap des1_ick_hw = {
-       .hw = {
-               .clk = &des1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_DES1_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(des1_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk des2_ick;
-
-static struct clk_hw_omap des2_ick_hw = {
-       .hw = {
-               .clk = &des2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_DES2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(des2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_DIVIDER(dpll1_fck, "core_ck", &core_ck, 0x0,
-                  OMAP_CM_REGADDR(MPU_MOD, OMAP3430_CM_CLKSEL1_PLL),
-                  OMAP3430_MPU_CLK_SRC_SHIFT, OMAP3430_MPU_CLK_SRC_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll2_fck;
-
-static struct dpll_data dpll2_dd = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
-       .mult_mask      = OMAP3430_IVA2_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430_IVA2_DPLL_DIV_MASK,
-       .clk_bypass     = &dpll2_fck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430_IVA2_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKEN_PLL),
-       .enable_mask    = OMAP3430_EN_IVA2_DPLL_MASK,
-       .modes          = ((1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED) |
-                          (1 << DPLL_LOW_POWER_BYPASS)),
-       .auto_recal_bit = OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430_PRM_IRQENABLE_MPU_IVA2_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430_PRM_IRQSTATUS_MPU_IVA2_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_AUTOIDLE_PLL),
-       .autoidle_mask  = OMAP3430_AUTO_IVA2_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_IDLEST_PLL),
-       .idlest_mask    = OMAP3430_ST_IVA2_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll2_ck;
-
-static struct clk_hw_omap dpll2_ck_hw = {
-       .hw = {
-               .clk = &dpll2_ck,
-       },
-       .ops            = &clkhwops_omap3_dpll,
-       .dpll_data      = &dpll2_dd,
-       .clkdm_name     = "dpll2_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll2_ck, dpll3_ck_parent_names, dpll1_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll2_fck, "core_ck", &core_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL1_PLL),
-                  OMAP3430_IVA2_CLK_SRC_SHIFT, OMAP3430_IVA2_CLK_SRC_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(dpll2_m2_ck, "dpll2_ck", &dpll2_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, OMAP3430_CM_CLKSEL2_PLL),
-                  OMAP3430_IVA2_DPLL_CLKOUT_DIV_SHIFT,
-                  OMAP3430_IVA2_DPLL_CLKOUT_DIV_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(dpll3_m3_ck, "dpll3_ck", &dpll3_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_DIV_DPLL3_SHIFT, OMAP3430_DIV_DPLL3_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll3_m3x2_ck;
-
-static const char *dpll3_m3x2_ck_parent_names[] = {
-       "dpll3_m3_ck",
-};
-
-static struct clk_hw_omap dpll3_m3x2_ck_hw = {
-       .hw = {
-               .clk = &dpll3_m3x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_EMU_CORE_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll3_m3x2_ck, dpll3_m3x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll3_m3x2_ck_3630 = {
-       .name           = "dpll3_m3x2_ck",
-       .hw             = &dpll3_m3x2_ck_hw.hw,
-       .parent_names   = dpll3_m3x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll3_m3x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-};
-
-DEFINE_CLK_FIXED_FACTOR(dpll3_x2_ck, "dpll3_ck", &dpll3_ck, 0x0, 2, 1);
-
-DEFINE_CLK_DIVIDER_TABLE(dpll4_m4_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_DSS1_SHIFT, OMAP3630_CLKSEL_DSS1_WIDTH,
-                  0, dpll4_mx_ck_div_table, NULL);
-
-static struct clk dpll4_m4x2_ck;
-
-static const char *dpll4_m4x2_ck_parent_names[] = {
-       "dpll4_m4_ck",
-};
-
-static struct clk_hw_omap dpll4_m4x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m4x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_DSS1_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dpll4_m4x2_ck, dpll4_m4x2_ck_parent_names,
-               dpll4_m5x2_ck_ops, CLK_SET_RATE_PARENT);
-
-static struct clk dpll4_m4x2_ck_3630 = {
-       .name           = "dpll4_m4x2_ck",
-       .hw             = &dpll4_m4x2_ck_hw.hw,
-       .parent_names   = dpll4_m4x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m4x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-       .flags          = CLK_SET_RATE_PARENT,
-};
-
-DEFINE_CLK_DIVIDER(dpll4_m6_ck, "dpll4_ck", &dpll4_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_DIV_DPLL4_SHIFT, OMAP3630_DIV_DPLL4_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dpll4_m6x2_ck;
-
-static const char *dpll4_m6x2_ck_parent_names[] = {
-       "dpll4_m6_ck",
-};
-
-static struct clk_hw_omap dpll4_m6x2_ck_hw = {
-       .hw = {
-               .clk = &dpll4_m6x2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
-       .enable_bit     = OMAP3430_PWRDN_EMU_PERIPH_SHIFT,
-       .flags          = INVERT_ENABLE,
-       .clkdm_name     = "dpll4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll4_m6x2_ck, dpll4_m6x2_ck_parent_names, dpll4_m5x2_ck_ops);
-
-static struct clk dpll4_m6x2_ck_3630 = {
-       .name           = "dpll4_m6x2_ck",
-       .hw             = &dpll4_m6x2_ck_hw.hw,
-       .parent_names   = dpll4_m6x2_ck_parent_names,
-       .num_parents    = ARRAY_SIZE(dpll4_m6x2_ck_parent_names),
-       .ops            = &dpll4_m5x2_ck_3630_ops,
-};
-
-DEFINE_CLK_FIXED_FACTOR(dpll4_x2_ck, "dpll4_ck", &dpll4_ck, 0x0, 2, 1);
-
-static struct dpll_data dpll5_dd = {
-       .mult_div1_reg  = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL4),
-       .mult_mask      = OMAP3430ES2_PERIPH2_DPLL_MULT_MASK,
-       .div1_mask      = OMAP3430ES2_PERIPH2_DPLL_DIV_MASK,
-       .clk_bypass     = &sys_ck,
-       .clk_ref        = &sys_ck,
-       .freqsel_mask   = OMAP3430ES2_PERIPH2_DPLL_FREQSEL_MASK,
-       .control_reg    = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKEN2),
-       .enable_mask    = OMAP3430ES2_EN_PERIPH2_DPLL_MASK,
-       .modes          = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
-       .auto_recal_bit = OMAP3430ES2_EN_PERIPH2_DPLL_DRIFTGUARD_SHIFT,
-       .recal_en_bit   = OMAP3430ES2_SND_PERIPH_DPLL_RECAL_EN_SHIFT,
-       .recal_st_bit   = OMAP3430ES2_SND_PERIPH_DPLL_ST_SHIFT,
-       .autoidle_reg   = OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_AUTOIDLE2_PLL),
-       .autoidle_mask  = OMAP3430ES2_AUTO_PERIPH2_DPLL_MASK,
-       .idlest_reg     = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST2),
-       .idlest_mask    = OMAP3430ES2_ST_PERIPH2_CLK_MASK,
-       .max_multiplier = OMAP3_MAX_DPLL_MULT,
-       .min_divider    = 1,
-       .max_divider    = OMAP3_MAX_DPLL_DIV,
-};
-
-static struct clk dpll5_ck;
-
-static struct clk_hw_omap dpll5_ck_hw = {
-       .hw = {
-               .clk = &dpll5_ck,
-       },
-       .ops            = &clkhwops_omap3_dpll,
-       .dpll_data      = &dpll5_dd,
-       .clkdm_name     = "dpll5_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dpll5_ck, dpll3_ck_parent_names, dpll1_ck_ops);
-
-DEFINE_CLK_DIVIDER(dpll5_m2_ck, "dpll5_ck", &dpll5_ck, 0x0,
-                  OMAP_CM_REGADDR(PLL_MOD, OMAP3430ES2_CM_CLKSEL5),
-                  OMAP3430ES2_DIV_120M_SHIFT, OMAP3430ES2_DIV_120M_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk dss1_alwon_fck_3430es1;
-
-static const char *dss1_alwon_fck_3430es1_parent_names[] = {
-       "dpll4_m4x2_ck",
-};
-
-static struct clk_hw_omap dss1_alwon_fck_3430es1_hw = {
-       .hw = {
-               .clk = &dss1_alwon_fck_3430es1,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_DSS1_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es1,
-               dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops,
-               CLK_SET_RATE_PARENT);
-
-static struct clk dss1_alwon_fck_3430es2;
-
-static struct clk_hw_omap dss1_alwon_fck_3430es2_hw = {
-       .hw = {
-               .clk = &dss1_alwon_fck_3430es2,
-       },
-       .ops            = &clkhwops_omap3430es2_dss_usbhost_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_DSS1_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK_FLAGS(dss1_alwon_fck_3430es2,
-               dss1_alwon_fck_3430es1_parent_names, aes2_ick_ops,
-               CLK_SET_RATE_PARENT);
-
-static struct clk dss2_alwon_fck;
-
-static struct clk_hw_omap dss2_alwon_fck_hw = {
-       .hw = {
-               .clk = &dss2_alwon_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_DSS2_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss2_alwon_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk dss_96m_fck;
-
-static struct clk_hw_omap dss_96m_fck_hw = {
-       .hw = {
-               .clk = &dss_96m_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_TV_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_96m_fck, core_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk dss_ick_3430es1;
-
-static struct clk_hw_omap dss_ick_3430es1_hw = {
-       .hw = {
-               .clk = &dss_ick_3430es1,
-       },
-       .ops            = &clkhwops_iclk,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_ick_3430es1, security_l4_ick2_parent_names, aes2_ick_ops);
-
-static struct clk dss_ick_3430es2;
-
-static struct clk_hw_omap dss_ick_3430es2_hw = {
-       .hw = {
-               .clk = &dss_ick_3430es2,
-       },
-       .ops            = &clkhwops_omap3430es2_iclk_dss_usbhost_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_CM_ICLKEN_DSS_EN_DSS_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_ick_3430es2, security_l4_ick2_parent_names, aes2_ick_ops);
-
-static struct clk dss_tv_fck;
-
-static const char *dss_tv_fck_parent_names[] = {
-       "omap_54m_fck",
-};
-
-static struct clk_hw_omap dss_tv_fck_hw = {
-       .hw = {
-               .clk = &dss_tv_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_DSS_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_TV_SHIFT,
-       .clkdm_name     = "dss_clkdm",
-};
-
-DEFINE_STRUCT_CLK(dss_tv_fck, dss_tv_fck_parent_names, aes2_ick_ops);
-
-static struct clk emac_fck;
-
-static const char *emac_fck_parent_names[] = {
-       "rmii_ck",
-};
-
-static struct clk_hw_omap emac_fck_hw = {
-       .hw = {
-               .clk = &emac_fck,
-       },
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_CPGMAC_FCLK_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(emac_fck, emac_fck_parent_names, aes1_ick_ops);
-
-static struct clk ipss_ick;
-
-static const char *ipss_ick_parent_names[] = {
-       "core_l3_ick",
-};
-
-static struct clk_hw_omap ipss_ick_hw = {
-       .hw = {
-               .clk = &ipss_ick,
-       },
-       .ops            = &clkhwops_am35xx_ipss_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = AM35XX_EN_IPSS_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ipss_ick, ipss_ick_parent_names, aes2_ick_ops);
-
-static struct clk emac_ick;
-
-static const char *emac_ick_parent_names[] = {
-       "ipss_ick",
-};
-
-static struct clk_hw_omap emac_ick_hw = {
-       .hw = {
-               .clk = &emac_ick,
-       },
-       .ops            = &clkhwops_am35xx_ipss_module_wait,
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_CPGMAC_VBUSP_CLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(emac_ick, emac_ick_parent_names, aes2_ick_ops);
-
-static struct clk emu_core_alwon_ck;
-
-static const char *emu_core_alwon_ck_parent_names[] = {
-       "dpll3_m3x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(emu_core_alwon_ck, "dpll3_clkdm");
-DEFINE_STRUCT_CLK(emu_core_alwon_ck, emu_core_alwon_ck_parent_names,
-                 core_l4_ick_ops);
-
-static struct clk emu_mpu_alwon_ck;
-
-static const char *emu_mpu_alwon_ck_parent_names[] = {
-       "mpu_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(emu_mpu_alwon_ck, NULL);
-DEFINE_STRUCT_CLK(emu_mpu_alwon_ck, emu_mpu_alwon_ck_parent_names, core_ck_ops);
-
-static struct clk emu_per_alwon_ck;
-
-static const char *emu_per_alwon_ck_parent_names[] = {
-       "dpll4_m6x2_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(emu_per_alwon_ck, "dpll4_clkdm");
-DEFINE_STRUCT_CLK(emu_per_alwon_ck, emu_per_alwon_ck_parent_names,
-                 core_l4_ick_ops);
-
-static const char *emu_src_ck_parent_names[] = {
-       "sys_ck", "emu_core_alwon_ck", "emu_per_alwon_ck", "emu_mpu_alwon_ck",
-};
-
-static const struct clksel_rate emu_src_sys_rates[] = {
-       { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_core_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_per_rates[] = {
-       { .div = 1, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 0 },
-};
-
-static const struct clksel_rate emu_src_mpu_rates[] = {
-       { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 0 },
-};
-
-static const struct clksel emu_src_clksel[] = {
-       { .parent = &sys_ck,            .rates = emu_src_sys_rates },
-       { .parent = &emu_core_alwon_ck, .rates = emu_src_core_rates },
-       { .parent = &emu_per_alwon_ck,  .rates = emu_src_per_rates },
-       { .parent = &emu_mpu_alwon_ck,  .rates = emu_src_mpu_rates },
-       { .parent = NULL },
-};
-
-static const struct clk_ops emu_src_ck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-       .enable         = &omap2_clkops_enable_clkdm,
-       .disable        = &omap2_clkops_disable_clkdm,
-};
-
-static struct clk emu_src_ck;
-
-static struct clk_hw_omap emu_src_ck_hw = {
-       .hw = {
-               .clk = &emu_src_ck,
-       },
-       .clksel         = emu_src_clksel,
-       .clksel_reg     = OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-       .clksel_mask    = OMAP3430_MUX_CTRL_MASK,
-       .clkdm_name     = "emu_clkdm",
-};
-
-DEFINE_STRUCT_CLK(emu_src_ck, emu_src_ck_parent_names, emu_src_ck_ops);
-
-DEFINE_CLK_DIVIDER(atclk_fck, "emu_src_ck", &emu_src_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_CLKSEL_ATCLK_SHIFT, OMAP3430_CLKSEL_ATCLK_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk fac_ick;
-
-static struct clk_hw_omap fac_ick_hw = {
-       .hw = {
-               .clk = &fac_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430ES1_EN_FAC_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(fac_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk fshostusb_fck;
-
-static const char *fshostusb_fck_parent_names[] = {
-       "core_48m_fck",
-};
-
-static struct clk_hw_omap fshostusb_fck_hw = {
-       .hw = {
-               .clk = &fshostusb_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(fshostusb_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk gfx_l3_ck;
-
-static struct clk_hw_omap gfx_l3_ck_hw = {
-       .hw = {
-               .clk = &gfx_l3_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP_EN_GFX_SHIFT,
-       .clkdm_name     = "gfx_3430es1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gfx_l3_ck, core_l3_ick_parent_names, aes1_ick_ops);
-
-DEFINE_CLK_DIVIDER(gfx_l3_fck, "l3_ick", &l3_ick, 0x0,
-                  OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
-                  OMAP_CLKSEL_GFX_SHIFT, OMAP_CLKSEL_GFX_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk gfx_cg1_ck;
-
-static const char *gfx_cg1_ck_parent_names[] = {
-       "gfx_l3_fck",
-};
-
-static struct clk_hw_omap gfx_cg1_ck_hw = {
-       .hw = {
-               .clk = &gfx_cg1_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430ES1_EN_2D_SHIFT,
-       .clkdm_name     = "gfx_3430es1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gfx_cg1_ck, gfx_cg1_ck_parent_names, aes2_ick_ops);
-
-static struct clk gfx_cg2_ck;
-
-static struct clk_hw_omap gfx_cg2_ck_hw = {
-       .hw = {
-               .clk = &gfx_cg2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430ES1_EN_3D_SHIFT,
-       .clkdm_name     = "gfx_3430es1_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gfx_cg2_ck, gfx_cg1_ck_parent_names, aes2_ick_ops);
-
-static struct clk gfx_l3_ick;
-
-static const char *gfx_l3_ick_parent_names[] = {
-       "gfx_l3_ck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(gfx_l3_ick, "gfx_3430es1_clkdm");
-DEFINE_STRUCT_CLK(gfx_l3_ick, gfx_l3_ick_parent_names, core_l4_ick_ops);
-
-static struct clk wkup_32k_fck;
-
-static const char *wkup_32k_fck_parent_names[] = {
-       "omap_32k_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(wkup_32k_fck, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wkup_32k_fck, wkup_32k_fck_parent_names, core_l4_ick_ops);
-
-static struct clk gpio1_dbck;
-
-static const char *gpio1_dbck_parent_names[] = {
-       "wkup_32k_fck",
-};
-
-static struct clk_hw_omap gpio1_dbck_hw = {
-       .hw = {
-               .clk = &gpio1_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio1_dbck, gpio1_dbck_parent_names, aes2_ick_ops);
-
-static struct clk wkup_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(wkup_l4_ick, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wkup_l4_ick, cpefuse_fck_parent_names, core_l4_ick_ops);
-
-static struct clk gpio1_ick;
-
-static const char *gpio1_ick_parent_names[] = {
-       "wkup_l4_ick",
-};
-
-static struct clk_hw_omap gpio1_ick_hw = {
-       .hw = {
-               .clk = &gpio1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio1_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk per_32k_alwon_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_32k_alwon_fck, "per_clkdm");
-DEFINE_STRUCT_CLK(per_32k_alwon_fck, wkup_32k_fck_parent_names,
-                 core_l4_ick_ops);
-
-static struct clk gpio2_dbck;
-
-static const char *gpio2_dbck_parent_names[] = {
-       "per_32k_alwon_fck",
-};
-
-static struct clk_hw_omap gpio2_dbck_hw = {
-       .hw = {
-               .clk = &gpio2_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO2_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio2_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk per_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_l4_ick, "per_clkdm");
-DEFINE_STRUCT_CLK(per_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk gpio2_ick;
-
-static const char *gpio2_ick_parent_names[] = {
-       "per_l4_ick",
-};
-
-static struct clk_hw_omap gpio2_ick_hw = {
-       .hw = {
-               .clk = &gpio2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO2_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio2_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio3_dbck;
-
-static struct clk_hw_omap gpio3_dbck_hw = {
-       .hw = {
-               .clk = &gpio3_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio3_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio3_ick;
-
-static struct clk_hw_omap gpio3_ick_hw = {
-       .hw = {
-               .clk = &gpio3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio4_dbck;
-
-static struct clk_hw_omap gpio4_dbck_hw = {
-       .hw = {
-               .clk = &gpio4_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio4_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio4_ick;
-
-static struct clk_hw_omap gpio4_ick_hw = {
-       .hw = {
-               .clk = &gpio4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio5_dbck;
-
-static struct clk_hw_omap gpio5_dbck_hw = {
-       .hw = {
-               .clk = &gpio5_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO5_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio5_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio5_ick;
-
-static struct clk_hw_omap gpio5_ick_hw = {
-       .hw = {
-               .clk = &gpio5_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO5_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio5_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpio6_dbck;
-
-static struct clk_hw_omap gpio6_dbck_hw = {
-       .hw = {
-               .clk = &gpio6_dbck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO6_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio6_dbck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk gpio6_ick;
-
-static struct clk_hw_omap gpio6_ick_hw = {
-       .hw = {
-               .clk = &gpio6_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPIO6_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpio6_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpmc_fck;
-
-static struct clk_hw_omap gpmc_fck_hw = {
-       .hw = {
-               .clk = &gpmc_fck,
-       },
-       .flags          = ENABLE_ON_INIT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpmc_fck, ipss_ick_parent_names, core_l4_ick_ops);
-
-static const struct clksel omap343x_gpt_clksel[] = {
-       { .parent = &omap_32k_fck, .rates = gpt_32k_rates },
-       { .parent = &sys_ck, .rates = gpt_sys_rates },
-       { .parent = NULL },
-};
-
-static const char *gpt10_fck_parent_names[] = {
-       "omap_32k_fck", "sys_ck",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt10_fck, "core_l4_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT10_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_GPT10_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt10_ick;
-
-static struct clk_hw_omap gpt10_ick_hw = {
-       .hw = {
-               .clk = &gpt10_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_GPT10_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt10_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt11_fck, "core_l4_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT11_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_GPT11_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt11_ick;
-
-static struct clk_hw_omap gpt11_ick_hw = {
-       .hw = {
-               .clk = &gpt11_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_GPT11_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt11_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk gpt12_fck;
-
-static const char *gpt12_fck_parent_names[] = {
-       "secure_32k_fck",
-};
-
-DEFINE_STRUCT_CLK_HW_OMAP(gpt12_fck, "wkup_clkdm");
-DEFINE_STRUCT_CLK(gpt12_fck, gpt12_fck_parent_names, core_l4_ick_ops);
-
-static struct clk gpt12_ick;
-
-static struct clk_hw_omap gpt12_ick_hw = {
-       .hw = {
-               .clk = &gpt12_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT12_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt12_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt1_fck, "wkup_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT1_MASK,
-                        OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT1_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt1_ick;
-
-static struct clk_hw_omap gpt1_ick_hw = {
-       .hw = {
-               .clk = &gpt1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt1_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt2_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT2_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT2_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt2_ick;
-
-static struct clk_hw_omap gpt2_ick_hw = {
-       .hw = {
-               .clk = &gpt2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT2_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt2_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt3_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT3_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT3_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt3_ick;
-
-static struct clk_hw_omap gpt3_ick_hw = {
-       .hw = {
-               .clk = &gpt3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt4_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT4_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT4_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt4_ick;
-
-static struct clk_hw_omap gpt4_ick_hw = {
-       .hw = {
-               .clk = &gpt4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt5_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT5_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT5_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt5_ick;
-
-static struct clk_hw_omap gpt5_ick_hw = {
-       .hw = {
-               .clk = &gpt5_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT5_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt5_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt6_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT6_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT6_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt6_ick;
-
-static struct clk_hw_omap gpt6_ick_hw = {
-       .hw = {
-               .clk = &gpt6_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT6_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt6_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt7_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT7_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT7_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt7_ick;
-
-static struct clk_hw_omap gpt7_ick_hw = {
-       .hw = {
-               .clk = &gpt7_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT7_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt7_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt8_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT8_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT8_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt8_ick;
-
-static struct clk_hw_omap gpt8_ick_hw = {
-       .hw = {
-               .clk = &gpt8_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT8_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt8_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(gpt9_fck, "per_clkdm", omap343x_gpt_clksel,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_GPT9_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_GPT9_SHIFT, &clkhwops_wait,
-                        gpt10_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk gpt9_ick;
-
-static struct clk_hw_omap gpt9_ick_hw = {
-       .hw = {
-               .clk = &gpt9_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_GPT9_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(gpt9_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk hdq_fck;
-
-static const char *hdq_fck_parent_names[] = {
-       "core_12m_fck",
-};
-
-static struct clk_hw_omap hdq_fck_hw = {
-       .hw = {
-               .clk = &hdq_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_HDQ_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hdq_fck, hdq_fck_parent_names, aes2_ick_ops);
-
-static struct clk hdq_ick;
-
-static struct clk_hw_omap hdq_ick_hw = {
-       .hw = {
-               .clk = &hdq_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_HDQ_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hdq_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk hecc_ck;
-
-static struct clk_hw_omap hecc_ck_hw = {
-       .hw = {
-               .clk = &hecc_ck,
-       },
-       .ops            = &clkhwops_am35xx_ipss_module_wait,
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_HECC_VBUSP_CLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hecc_ck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_fck_am35xx;
-
-static struct clk_hw_omap hsotgusb_fck_am35xx_hw = {
-       .hw = {
-               .clk = &hsotgusb_fck_am35xx,
-       },
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_USBOTG_FCLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_ick_3430es1;
-
-static struct clk_hw_omap hsotgusb_ick_3430es1_hw = {
-       .hw = {
-               .clk = &hsotgusb_ick_3430es1,
-       },
-       .ops            = &clkhwops_iclk,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_HSOTGUSB_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_ick_3430es1, ipss_ick_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_ick_3430es2;
-
-static struct clk_hw_omap hsotgusb_ick_3430es2_hw = {
-       .hw = {
-               .clk = &hsotgusb_ick_3430es2,
-       },
-       .ops            = &clkhwops_omap3430es2_iclk_hsotgusb_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_HSOTGUSB_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_ick_3430es2, ipss_ick_parent_names, aes2_ick_ops);
-
-static struct clk hsotgusb_ick_am35xx;
-
-static struct clk_hw_omap hsotgusb_ick_am35xx_hw = {
-       .hw = {
-               .clk = &hsotgusb_ick_am35xx,
-       },
-       .ops            = &clkhwops_am35xx_ipss_module_wait,
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_USBOTG_VBUSP_CLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(hsotgusb_ick_am35xx, emac_ick_parent_names, aes2_ick_ops);
-
-static struct clk i2c1_fck;
-
-static struct clk_hw_omap i2c1_fck_hw = {
-       .hw = {
-               .clk = &i2c1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c1_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk i2c1_ick;
-
-static struct clk_hw_omap i2c1_ick_hw = {
-       .hw = {
-               .clk = &i2c1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk i2c2_fck;
-
-static struct clk_hw_omap i2c2_fck_hw = {
-       .hw = {
-               .clk = &i2c2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c2_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk i2c2_ick;
-
-static struct clk_hw_omap i2c2_ick_hw = {
-       .hw = {
-               .clk = &i2c2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk i2c3_fck;
-
-static struct clk_hw_omap i2c3_fck_hw = {
-       .hw = {
-               .clk = &i2c3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c3_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk i2c3_ick;
-
-static struct clk_hw_omap i2c3_ick_hw = {
-       .hw = {
-               .clk = &i2c3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_I2C3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(i2c3_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk icr_ick;
-
-static struct clk_hw_omap icr_ick_hw = {
-       .hw = {
-               .clk = &icr_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_ICR_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(icr_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk iva2_ck;
-
-static const char *iva2_ck_parent_names[] = {
-       "dpll2_m2_ck",
-};
-
-static struct clk_hw_omap iva2_ck_hw = {
-       .hw = {
-               .clk = &iva2_ck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_IVA2_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT,
-       .clkdm_name     = "iva2_clkdm",
-};
-
-DEFINE_STRUCT_CLK(iva2_ck, iva2_ck_parent_names, aes2_ick_ops);
-
-static struct clk mad2d_ick;
-
-static struct clk_hw_omap mad2d_ick_hw = {
-       .hw = {
-               .clk = &mad2d_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
-       .enable_bit     = OMAP3430_EN_MAD2D_SHIFT,
-       .clkdm_name     = "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mad2d_ick, core_l3_ick_parent_names, aes2_ick_ops);
-
-static struct clk mailboxes_ick;
-
-static struct clk_hw_omap mailboxes_ick_hw = {
-       .hw = {
-               .clk = &mailboxes_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MAILBOXES_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mailboxes_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate common_mcbsp_96m_rates[] = {
-       { .div = 1, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel mcbsp_15_clksel[] = {
-       { .parent = &core_96m_fck, .rates = common_mcbsp_96m_rates },
-       { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
-       { .parent = NULL },
-};
-
-static const char *mcbsp1_fck_parent_names[] = {
-       "core_96m_fck", "mcbsp_clks",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "core_l4_clkdm", mcbsp_15_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
-                        OMAP2_MCBSP1_CLKS_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_MCBSP1_SHIFT, &clkhwops_wait,
-                        mcbsp1_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp1_ick;
-
-static struct clk_hw_omap mcbsp1_ick_hw = {
-       .hw = {
-               .clk = &mcbsp1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCBSP1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk per_96m_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_96m_fck, "per_clkdm");
-DEFINE_STRUCT_CLK(per_96m_fck, cm_96m_fck_parent_names, core_l4_ick_ops);
-
-static const struct clksel mcbsp_234_clksel[] = {
-       { .parent = &per_96m_fck, .rates = common_mcbsp_96m_rates },
-       { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
-       { .parent = NULL },
-};
-
-static const char *mcbsp2_fck_parent_names[] = {
-       "per_96m_fck", "mcbsp_clks",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "per_clkdm", mcbsp_234_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
-                        OMAP2_MCBSP2_CLKS_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_MCBSP2_SHIFT, &clkhwops_wait,
-                        mcbsp2_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp2_ick;
-
-static struct clk_hw_omap mcbsp2_ick_hw = {
-       .hw = {
-               .clk = &mcbsp2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_MCBSP2_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp2_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp3_fck, "per_clkdm", mcbsp_234_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
-                        OMAP2_MCBSP3_CLKS_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_MCBSP3_SHIFT, &clkhwops_wait,
-                        mcbsp2_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp3_ick;
-
-static struct clk_hw_omap mcbsp3_ick_hw = {
-       .hw = {
-               .clk = &mcbsp3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_MCBSP3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp4_fck, "per_clkdm", mcbsp_234_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
-                        OMAP2_MCBSP4_CLKS_MASK,
-                        OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-                        OMAP3430_EN_MCBSP4_SHIFT, &clkhwops_wait,
-                        mcbsp2_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp4_ick;
-
-static struct clk_hw_omap mcbsp4_ick_hw = {
-       .hw = {
-               .clk = &mcbsp4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_MCBSP4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(mcbsp5_fck, "core_l4_clkdm", mcbsp_15_clksel,
-                        OMAP343X_CTRL_REGADDR(OMAP343X_CONTROL_DEVCONF1),
-                        OMAP2_MCBSP5_CLKS_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_MCBSP5_SHIFT, &clkhwops_wait,
-                        mcbsp1_fck_parent_names, clkout2_src_ck_ops);
-
-static struct clk mcbsp5_ick;
-
-static struct clk_hw_omap mcbsp5_ick_hw = {
-       .hw = {
-               .clk = &mcbsp5_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCBSP5_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcbsp5_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi1_fck;
-
-static struct clk_hw_omap mcspi1_fck_hw = {
-       .hw = {
-               .clk = &mcspi1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi1_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi1_ick;
-
-static struct clk_hw_omap mcspi1_ick_hw = {
-       .hw = {
-               .clk = &mcspi1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi2_fck;
-
-static struct clk_hw_omap mcspi2_fck_hw = {
-       .hw = {
-               .clk = &mcspi2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi2_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi2_ick;
-
-static struct clk_hw_omap mcspi2_ick_hw = {
-       .hw = {
-               .clk = &mcspi2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi3_fck;
-
-static struct clk_hw_omap mcspi3_fck_hw = {
-       .hw = {
-               .clk = &mcspi3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi3_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi3_ick;
-
-static struct clk_hw_omap mcspi3_ick_hw = {
-       .hw = {
-               .clk = &mcspi3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi3_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mcspi4_fck;
-
-static struct clk_hw_omap mcspi4_fck_hw = {
-       .hw = {
-               .clk = &mcspi4_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI4_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi4_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk mcspi4_ick;
-
-static struct clk_hw_omap mcspi4_ick_hw = {
-       .hw = {
-               .clk = &mcspi4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MCSPI4_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mcspi4_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mmchs1_fck;
-
-static struct clk_hw_omap mmchs1_fck_hw = {
-       .hw = {
-               .clk = &mmchs1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MMC1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs1_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mmchs1_ick;
-
-static struct clk_hw_omap mmchs1_ick_hw = {
-       .hw = {
-               .clk = &mmchs1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MMC1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mmchs2_fck;
-
-static struct clk_hw_omap mmchs2_fck_hw = {
-       .hw = {
-               .clk = &mmchs2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MMC2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs2_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mmchs2_ick;
-
-static struct clk_hw_omap mmchs2_ick_hw = {
-       .hw = {
-               .clk = &mmchs2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MMC2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk mmchs3_fck;
-
-static struct clk_hw_omap mmchs3_fck_hw = {
-       .hw = {
-               .clk = &mmchs3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430ES2_EN_MMC3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs3_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mmchs3_ick;
-
-static struct clk_hw_omap mmchs3_ick_hw = {
-       .hw = {
-               .clk = &mmchs3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430ES2_EN_MMC3_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mmchs3_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk modem_fck;
-
-static struct clk_hw_omap modem_fck_hw = {
-       .hw = {
-               .clk = &modem_fck,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MODEM_SHIFT,
-       .clkdm_name     = "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(modem_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk mspro_fck;
-
-static struct clk_hw_omap mspro_fck_hw = {
-       .hw = {
-               .clk = &mspro_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_MSPRO_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mspro_fck, csi2_96m_fck_parent_names, aes2_ick_ops);
-
-static struct clk mspro_ick;
-
-static struct clk_hw_omap mspro_ick_hw = {
-       .hw = {
-               .clk = &mspro_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_MSPRO_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(mspro_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk omap_192m_alwon_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(omap_192m_alwon_fck, NULL);
-DEFINE_STRUCT_CLK(omap_192m_alwon_fck, omap_96m_alwon_fck_parent_names,
-                 core_ck_ops);
-
-static struct clk omap_32ksync_ick;
-
-static struct clk_hw_omap omap_32ksync_ick_hw = {
-       .hw = {
-               .clk = &omap_32ksync_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_32KSYNC_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(omap_32ksync_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate omap_96m_alwon_fck_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_36XX },
-       { .div = 2, .val = 2, .flags = RATE_IN_36XX },
-       { .div = 0 }
-};
-
-static const struct clksel omap_96m_alwon_fck_clksel[] = {
-       { .parent = &omap_192m_alwon_fck, .rates = omap_96m_alwon_fck_rates },
-       { .parent = NULL }
-};
-
-static struct clk omap_96m_alwon_fck_3630;
-
-static const char *omap_96m_alwon_fck_3630_parent_names[] = {
-       "omap_192m_alwon_fck",
-};
-
-static const struct clk_ops omap_96m_alwon_fck_3630_ops = {
-       .set_rate       = &omap2_clksel_set_rate,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .round_rate     = &omap2_clksel_round_rate,
-};
-
-static struct clk_hw_omap omap_96m_alwon_fck_3630_hw = {
-       .hw = {
-               .clk = &omap_96m_alwon_fck_3630,
-       },
-       .clksel         = omap_96m_alwon_fck_clksel,
-       .clksel_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-       .clksel_mask    = OMAP3630_CLKSEL_96M_MASK,
-};
-
-static struct clk omap_96m_alwon_fck_3630 = {
-       .name   = "omap_96m_alwon_fck",
-       .hw     = &omap_96m_alwon_fck_3630_hw.hw,
-       .parent_names   = omap_96m_alwon_fck_3630_parent_names,
-       .num_parents    = ARRAY_SIZE(omap_96m_alwon_fck_3630_parent_names),
-       .ops    = &omap_96m_alwon_fck_3630_ops,
-};
-
-static struct clk omapctrl_ick;
-
-static struct clk_hw_omap omapctrl_ick_hw = {
-       .hw = {
-               .clk = &omapctrl_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_OMAPCTRL_SHIFT,
-       .flags          = ENABLE_ON_INIT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(omapctrl_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-DEFINE_CLK_DIVIDER(pclk_fck, "emu_src_ck", &emu_src_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_CLKSEL_PCLK_SHIFT, OMAP3430_CLKSEL_PCLK_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-DEFINE_CLK_DIVIDER(pclkx2_fck, "emu_src_ck", &emu_src_ck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_CLKSEL_PCLKX2_SHIFT, OMAP3430_CLKSEL_PCLKX2_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk per_48m_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(per_48m_fck, "per_clkdm");
-DEFINE_STRUCT_CLK(per_48m_fck, core_48m_fck_parent_names, core_l4_ick_ops);
-
-static struct clk security_l3_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(security_l3_ick, NULL);
-DEFINE_STRUCT_CLK(security_l3_ick, core_l3_ick_parent_names, core_ck_ops);
-
-static struct clk pka_ick;
-
-static const char *pka_ick_parent_names[] = {
-       "security_l3_ick",
-};
-
-static struct clk_hw_omap pka_ick_hw = {
-       .hw = {
-               .clk = &pka_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_PKA_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(pka_ick, pka_ick_parent_names, aes1_ick_ops);
-
-DEFINE_CLK_DIVIDER(rm_ick, "l4_ick", &l4_ick, 0x0,
-                  OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
-                  OMAP3430_CLKSEL_RM_SHIFT, OMAP3430_CLKSEL_RM_WIDTH,
-                  CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk rng_ick;
-
-static struct clk_hw_omap rng_ick_hw = {
-       .hw = {
-               .clk = &rng_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_RNG_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(rng_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk sad2d_ick;
-
-static struct clk_hw_omap sad2d_ick_hw = {
-       .hw = {
-               .clk = &sad2d_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SAD2D_SHIFT,
-       .clkdm_name     = "d2d_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sad2d_ick, core_l3_ick_parent_names, aes2_ick_ops);
-
-static struct clk sdrc_ick;
-
-static struct clk_hw_omap sdrc_ick_hw = {
-       .hw = {
-               .clk = &sdrc_ick,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SDRC_SHIFT,
-       .flags          = ENABLE_ON_INIT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sdrc_ick, ipss_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate sgx_core_rates[] = {
-       { .div = 2, .val = 5, .flags = RATE_IN_36XX },
-       { .div = 3, .val = 0, .flags = RATE_IN_3XXX },
-       { .div = 4, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 6, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate sgx_96m_rates[] = {
-       { .div = 1, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate sgx_192m_rates[] = {
-       { .div = 1, .val = 4, .flags = RATE_IN_36XX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate sgx_corex2_rates[] = {
-       { .div = 3, .val = 6, .flags = RATE_IN_36XX },
-       { .div = 5, .val = 7, .flags = RATE_IN_36XX },
-       { .div = 0 }
-};
-
-static const struct clksel sgx_clksel[] = {
-       { .parent = &core_ck, .rates = sgx_core_rates },
-       { .parent = &cm_96m_fck, .rates = sgx_96m_rates },
-       { .parent = &omap_192m_alwon_fck, .rates = sgx_192m_rates },
-       { .parent = &corex2_fck, .rates = sgx_corex2_rates },
-       { .parent = NULL },
-};
-
-static const char *sgx_fck_parent_names[] = {
-       "core_ck", "cm_96m_fck", "omap_192m_alwon_fck", "corex2_fck",
-};
-
-static struct clk sgx_fck;
-
-static const struct clk_ops sgx_fck_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .set_rate       = &omap2_clksel_set_rate,
-       .round_rate     = &omap2_clksel_round_rate,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(sgx_fck, "sgx_clkdm", sgx_clksel,
-                        OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_CLKSEL),
-                        OMAP3430ES2_CLKSEL_SGX_MASK,
-                        OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_FCLKEN),
-                        OMAP3430ES2_CM_FCLKEN_SGX_EN_SGX_SHIFT,
-                        &clkhwops_wait, sgx_fck_parent_names, sgx_fck_ops);
-
-static struct clk sgx_ick;
-
-static struct clk_hw_omap sgx_ick_hw = {
-       .hw = {
-               .clk = &sgx_ick,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430ES2_SGX_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430ES2_CM_ICLKEN_SGX_EN_SGX_SHIFT,
-       .clkdm_name     = "sgx_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sgx_ick, core_l3_ick_parent_names, aes2_ick_ops);
-
-static struct clk sha11_ick;
-
-static struct clk_hw_omap sha11_ick_hw = {
-       .hw = {
-               .clk = &sha11_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
-       .enable_bit     = OMAP3430_EN_SHA11_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(sha11_ick, aes1_ick_parent_names, aes1_ick_ops);
-
-static struct clk sha12_ick;
-
-static struct clk_hw_omap sha12_ick_hw = {
-       .hw = {
-               .clk = &sha12_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SHA12_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sha12_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk sr1_fck;
-
-static struct clk_hw_omap sr1_fck_hw = {
-       .hw = {
-               .clk = &sr1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_SR1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sr1_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk sr2_fck;
-
-static struct clk_hw_omap sr2_fck_hw = {
-       .hw = {
-               .clk = &sr2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_SR2_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(sr2_fck, cpefuse_fck_parent_names, aes2_ick_ops);
-
-static struct clk sr_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(sr_l4_ick, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(sr_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk ssi_l4_ick;
-
-DEFINE_STRUCT_CLK_HW_OMAP(ssi_l4_ick, "core_l4_clkdm");
-DEFINE_STRUCT_CLK(ssi_l4_ick, security_l4_ick2_parent_names, core_l4_ick_ops);
-
-static struct clk ssi_ick_3430es1;
-
-static const char *ssi_ick_3430es1_parent_names[] = {
-       "ssi_l4_ick",
-};
-
-static struct clk_hw_omap ssi_ick_3430es1_hw = {
-       .hw = {
-               .clk = &ssi_ick_3430es1,
-       },
-       .ops            = &clkhwops_iclk,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SSI_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ssi_ick_3430es1, ssi_ick_3430es1_parent_names, aes2_ick_ops);
-
-static struct clk ssi_ick_3430es2;
-
-static struct clk_hw_omap ssi_ick_3430es2_hw = {
-       .hw = {
-               .clk = &ssi_ick_3430es2,
-       },
-       .ops            = &clkhwops_omap3430es2_iclk_ssi_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_SSI_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ssi_ick_3430es2, ssi_ick_3430es1_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate ssi_ssr_corex2_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 3, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
-       { .div = 6, .val = 6, .flags = RATE_IN_3XXX },
-       { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel ssi_ssr_clksel[] = {
-       { .parent = &corex2_fck, .rates = ssi_ssr_corex2_rates },
-       { .parent = NULL },
-};
-
-static const char *ssi_ssr_fck_3430es1_parent_names[] = {
-       "corex2_fck",
-};
-
-static const struct clk_ops ssi_ssr_fck_3430es1_ops = {
-       .init           = &omap2_init_clk_clkdm,
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .set_rate       = &omap2_clksel_set_rate,
-       .round_rate     = &omap2_clksel_round_rate,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es1, "core_l4_clkdm",
-                        ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_SSI_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_SSI_SHIFT,
-                        NULL, ssi_ssr_fck_3430es1_parent_names,
-                        ssi_ssr_fck_3430es1_ops);
-
-DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_fck_3430es2, "core_l4_clkdm",
-                        ssi_ssr_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430_CLKSEL_SSI_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-                        OMAP3430_EN_SSI_SHIFT,
-                        NULL, ssi_ssr_fck_3430es1_parent_names,
-                        ssi_ssr_fck_3430es1_ops);
-
-DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es1, "ssi_ssr_fck_3430es1",
-                       &ssi_ssr_fck_3430es1, 0x0, 1, 2);
-
-DEFINE_CLK_FIXED_FACTOR(ssi_sst_fck_3430es2, "ssi_ssr_fck_3430es2",
-                       &ssi_ssr_fck_3430es2, 0x0, 1, 2);
-
-static struct clk sys_clkout1;
-
-static const char *sys_clkout1_parent_names[] = {
-       "osc_sys_ck",
-};
-
-static struct clk_hw_omap sys_clkout1_hw = {
-       .hw = {
-               .clk = &sys_clkout1,
-       },
-       .enable_reg     = OMAP3430_PRM_CLKOUT_CTRL,
-       .enable_bit     = OMAP3430_CLKOUT_EN_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(sys_clkout1, sys_clkout1_parent_names, aes1_ick_ops);
-
-DEFINE_CLK_DIVIDER(sys_clkout2, "clkout2_src_ck", &clkout2_src_ck, 0x0,
-                  OMAP3430_CM_CLKOUT_CTRL, OMAP3430_CLKOUT2_DIV_SHIFT,
-                  OMAP3430_CLKOUT2_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL);
-
-DEFINE_CLK_MUX(traceclk_src_fck, emu_src_ck_parent_names, NULL, 0x0,
-              OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-              OMAP3430_TRACE_MUX_CTRL_SHIFT, OMAP3430_TRACE_MUX_CTRL_WIDTH,
-              0x0, NULL);
-
-DEFINE_CLK_DIVIDER(traceclk_fck, "traceclk_src_fck", &traceclk_src_fck, 0x0,
-                  OMAP_CM_REGADDR(OMAP3430_EMU_MOD, CM_CLKSEL1),
-                  OMAP3430_CLKSEL_TRACECLK_SHIFT,
-                  OMAP3430_CLKSEL_TRACECLK_WIDTH, CLK_DIVIDER_ONE_BASED, NULL);
-
-static struct clk ts_fck;
-
-static struct clk_hw_omap ts_fck_hw = {
-       .hw = {
-               .clk = &ts_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
-       .enable_bit     = OMAP3430ES2_EN_TS_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(ts_fck, wkup_32k_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart1_fck;
-
-static struct clk_hw_omap uart1_fck_hw = {
-       .hw = {
-               .clk = &uart1_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_UART1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart1_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart1_ick;
-
-static struct clk_hw_omap uart1_ick_hw = {
-       .hw = {
-               .clk = &uart1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_UART1_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart1_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart2_fck;
-
-static struct clk_hw_omap uart2_fck_hw = {
-       .hw = {
-               .clk = &uart2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = OMAP3430_EN_UART2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart2_fck, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart2_ick;
-
-static struct clk_hw_omap uart2_ick_hw = {
-       .hw = {
-               .clk = &uart2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = OMAP3430_EN_UART2_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart2_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart3_fck;
-
-static const char *uart3_fck_parent_names[] = {
-       "per_48m_fck",
-};
-
-static struct clk_hw_omap uart3_fck_hw = {
-       .hw = {
-               .clk = &uart3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_UART3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart3_fck, uart3_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart3_ick;
-
-static struct clk_hw_omap uart3_ick_hw = {
-       .hw = {
-               .clk = &uart3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_UART3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart4_fck;
-
-static struct clk_hw_omap uart4_fck_hw = {
-       .hw = {
-               .clk = &uart4_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3630_EN_UART4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_fck, uart3_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart4_fck_am35xx;
-
-static struct clk_hw_omap uart4_fck_am35xx_hw = {
-       .hw = {
-               .clk = &uart4_fck_am35xx,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
-       .enable_bit     = AM35XX_EN_UART4_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_fck_am35xx, fshostusb_fck_parent_names, aes2_ick_ops);
-
-static struct clk uart4_ick;
-
-static struct clk_hw_omap uart4_ick_hw = {
-       .hw = {
-               .clk = &uart4_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3630_EN_UART4_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-static struct clk uart4_ick_am35xx;
-
-static struct clk_hw_omap uart4_ick_am35xx_hw = {
-       .hw = {
-               .clk = &uart4_ick_am35xx,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-       .enable_bit     = AM35XX_EN_UART4_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(uart4_ick_am35xx, aes2_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate div2_rates[] = {
-       { .div = 1, .val = 1, .flags = RATE_IN_3XXX },
-       { .div = 2, .val = 2, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel usb_l4_clksel[] = {
-       { .parent = &l4_ick, .rates = div2_rates },
-       { .parent = NULL },
-};
-
-static const char *usb_l4_ick_parent_names[] = {
-       "l4_ick",
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(usb_l4_ick, "core_l4_clkdm", usb_l4_clksel,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL),
-                        OMAP3430ES1_CLKSEL_FSHOSTUSB_MASK,
-                        OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
-                        OMAP3430ES1_EN_FSHOSTUSB_SHIFT,
-                        &clkhwops_iclk_wait, usb_l4_ick_parent_names,
-                        ssi_ssr_fck_3430es1_ops);
-
-static struct clk usbhost_120m_fck;
-
-static const char *usbhost_120m_fck_parent_names[] = {
-       "dpll5_m2_ck",
-};
-
-static struct clk_hw_omap usbhost_120m_fck_hw = {
-       .hw = {
-               .clk = &usbhost_120m_fck,
-       },
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430ES2_EN_USBHOST2_SHIFT,
-       .clkdm_name     = "usbhost_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbhost_120m_fck, usbhost_120m_fck_parent_names,
-                 aes2_ick_ops);
-
-static struct clk usbhost_48m_fck;
-
-static struct clk_hw_omap usbhost_48m_fck_hw = {
-       .hw = {
-               .clk = &usbhost_48m_fck,
-       },
-       .ops            = &clkhwops_omap3430es2_dss_usbhost_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430ES2_EN_USBHOST1_SHIFT,
-       .clkdm_name     = "usbhost_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbhost_48m_fck, core_48m_fck_parent_names, aes2_ick_ops);
-
-static struct clk usbhost_ick;
-
-static struct clk_hw_omap usbhost_ick_hw = {
-       .hw = {
-               .clk = &usbhost_ick,
-       },
-       .ops            = &clkhwops_omap3430es2_iclk_dss_usbhost_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430ES2_USBHOST_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430ES2_EN_USBHOST_SHIFT,
-       .clkdm_name     = "usbhost_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbhost_ick, security_l4_ick2_parent_names, aes2_ick_ops);
-
-static struct clk usbtll_fck;
-
-static struct clk_hw_omap usbtll_fck_hw = {
-       .hw = {
-               .clk = &usbtll_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, OMAP3430ES2_CM_FCLKEN3),
-       .enable_bit     = OMAP3430ES2_EN_USBTLL_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbtll_fck, usbhost_120m_fck_parent_names, aes2_ick_ops);
-
-static struct clk usbtll_ick;
-
-static struct clk_hw_omap usbtll_ick_hw = {
-       .hw = {
-               .clk = &usbtll_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
-       .enable_bit     = OMAP3430ES2_EN_USBTLL_SHIFT,
-       .clkdm_name     = "core_l4_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usbtll_ick, aes2_ick_parent_names, aes2_ick_ops);
-
-static const struct clksel_rate usim_96m_rates[] = {
-       { .div = 2, .val = 3, .flags = RATE_IN_3XXX },
-       { .div = 4, .val = 4, .flags = RATE_IN_3XXX },
-       { .div = 8, .val = 5, .flags = RATE_IN_3XXX },
-       { .div = 10, .val = 6, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel_rate usim_120m_rates[] = {
-       { .div = 4, .val = 7, .flags = RATE_IN_3XXX },
-       { .div = 8, .val = 8, .flags = RATE_IN_3XXX },
-       { .div = 16, .val = 9, .flags = RATE_IN_3XXX },
-       { .div = 20, .val = 10, .flags = RATE_IN_3XXX },
-       { .div = 0 }
-};
-
-static const struct clksel usim_clksel[] = {
-       { .parent = &omap_96m_fck, .rates = usim_96m_rates },
-       { .parent = &dpll5_m2_ck, .rates = usim_120m_rates },
-       { .parent = &sys_ck, .rates = div2_rates },
-       { .parent = NULL },
-};
-
-static const char *usim_fck_parent_names[] = {
-       "omap_96m_fck", "dpll5_m2_ck", "sys_ck",
-};
-
-static struct clk usim_fck;
-
-static const struct clk_ops usim_fck_ops = {
-       .enable         = &omap2_dflt_clk_enable,
-       .disable        = &omap2_dflt_clk_disable,
-       .is_enabled     = &omap2_dflt_clk_is_enabled,
-       .recalc_rate    = &omap2_clksel_recalc,
-       .get_parent     = &omap2_clksel_find_parent_index,
-       .set_parent     = &omap2_clksel_set_parent,
-};
-
-DEFINE_CLK_OMAP_MUX_GATE(usim_fck, NULL, usim_clksel,
-                        OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL),
-                        OMAP3430ES2_CLKSEL_USIMOCP_MASK,
-                        OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-                        OMAP3430ES2_EN_USIMOCP_SHIFT, &clkhwops_wait,
-                        usim_fck_parent_names, usim_fck_ops);
-
-static struct clk usim_ick;
-
-static struct clk_hw_omap usim_ick_hw = {
-       .hw = {
-               .clk = &usim_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430ES2_EN_USIMOCP_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(usim_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk vpfe_fck;
-
-static const char *vpfe_fck_parent_names[] = {
-       "pclk_ck",
-};
-
-static struct clk_hw_omap vpfe_fck_hw = {
-       .hw = {
-               .clk = &vpfe_fck,
-       },
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_VPFE_FCLK_SHIFT,
-};
-
-DEFINE_STRUCT_CLK(vpfe_fck, vpfe_fck_parent_names, aes1_ick_ops);
-
-static struct clk vpfe_ick;
-
-static struct clk_hw_omap vpfe_ick_hw = {
-       .hw = {
-               .clk = &vpfe_ick,
-       },
-       .ops            = &clkhwops_am35xx_ipss_module_wait,
-       .enable_reg     = OMAP343X_CTRL_REGADDR(AM35XX_CONTROL_IPSS_CLK_CTRL),
-       .enable_bit     = AM35XX_VPFE_VBUSP_CLK_SHIFT,
-       .clkdm_name     = "core_l3_clkdm",
-};
-
-DEFINE_STRUCT_CLK(vpfe_ick, emac_ick_parent_names, aes2_ick_ops);
-
-static struct clk wdt1_fck;
-
-DEFINE_STRUCT_CLK_HW_OMAP(wdt1_fck, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wdt1_fck, gpt12_fck_parent_names, core_l4_ick_ops);
-
-static struct clk wdt1_ick;
-
-static struct clk_hw_omap wdt1_ick_hw = {
-       .hw = {
-               .clk = &wdt1_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_WDT1_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt1_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk wdt2_fck;
-
-static struct clk_hw_omap wdt2_fck_hw = {
-       .hw = {
-               .clk = &wdt2_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_WDT2_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt2_fck, gpio1_dbck_parent_names, aes2_ick_ops);
-
-static struct clk wdt2_ick;
-
-static struct clk_hw_omap wdt2_ick_hw = {
-       .hw = {
-               .clk = &wdt2_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_WDT2_SHIFT,
-       .clkdm_name     = "wkup_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt2_ick, gpio1_ick_parent_names, aes2_ick_ops);
-
-static struct clk wdt3_fck;
-
-static struct clk_hw_omap wdt3_fck_hw = {
-       .hw = {
-               .clk = &wdt3_fck,
-       },
-       .ops            = &clkhwops_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_FCLKEN),
-       .enable_bit     = OMAP3430_EN_WDT3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt3_fck, gpio2_dbck_parent_names, aes2_ick_ops);
-
-static struct clk wdt3_ick;
-
-static struct clk_hw_omap wdt3_ick_hw = {
-       .hw = {
-               .clk = &wdt3_ick,
-       },
-       .ops            = &clkhwops_iclk_wait,
-       .enable_reg     = OMAP_CM_REGADDR(OMAP3430_PER_MOD, CM_ICLKEN),
-       .enable_bit     = OMAP3430_EN_WDT3_SHIFT,
-       .clkdm_name     = "per_clkdm",
-};
-
-DEFINE_STRUCT_CLK(wdt3_ick, gpio2_ick_parent_names, aes2_ick_ops);
-
-/*
- * clocks specific to omap3430es1
- */
-static struct omap_clk omap3430es1_clks[] = {
-       CLK(NULL,       "gfx_l3_ck",    &gfx_l3_ck),
-       CLK(NULL,       "gfx_l3_fck",   &gfx_l3_fck),
-       CLK(NULL,       "gfx_l3_ick",   &gfx_l3_ick),
-       CLK(NULL,       "gfx_cg1_ck",   &gfx_cg1_ck),
-       CLK(NULL,       "gfx_cg2_ck",   &gfx_cg2_ck),
-       CLK(NULL,       "d2d_26m_fck",  &d2d_26m_fck),
-       CLK(NULL,       "fshostusb_fck", &fshostusb_fck),
-       CLK(NULL,       "ssi_ssr_fck",  &ssi_ssr_fck_3430es1),
-       CLK(NULL,       "ssi_sst_fck",  &ssi_sst_fck_3430es1),
-       CLK("musb-omap2430",    "ick",  &hsotgusb_ick_3430es1),
-       CLK(NULL,       "hsotgusb_ick", &hsotgusb_ick_3430es1),
-       CLK(NULL,       "fac_ick",      &fac_ick),
-       CLK(NULL,       "ssi_ick",      &ssi_ick_3430es1),
-       CLK(NULL,       "usb_l4_ick",   &usb_l4_ick),
-       CLK(NULL,       "dss1_alwon_fck",       &dss1_alwon_fck_3430es1),
-       CLK("omapdss_dss",      "ick",          &dss_ick_3430es1),
-       CLK(NULL,       "dss_ick",              &dss_ick_3430es1),
-};
-
-/*
- * clocks specific to am35xx
- */
-static struct omap_clk am35xx_clks[] = {
-       CLK(NULL,       "ipss_ick",     &ipss_ick),
-       CLK(NULL,       "rmii_ck",      &rmii_ck),
-       CLK(NULL,       "pclk_ck",      &pclk_ck),
-       CLK(NULL,       "emac_ick",     &emac_ick),
-       CLK(NULL,       "emac_fck",     &emac_fck),
-       CLK("davinci_emac.0",   NULL,   &emac_ick),
-       CLK("davinci_mdio.0",   NULL,   &emac_fck),
-       CLK("vpfe-capture",     "master",       &vpfe_ick),
-       CLK("vpfe-capture",     "slave",        &vpfe_fck),
-       CLK(NULL,       "hsotgusb_ick",         &hsotgusb_ick_am35xx),
-       CLK(NULL,       "hsotgusb_fck",         &hsotgusb_fck_am35xx),
-       CLK(NULL,       "hecc_ck",      &hecc_ck),
-       CLK(NULL,       "uart4_ick",    &uart4_ick_am35xx),
-       CLK(NULL,       "uart4_fck",    &uart4_fck_am35xx),
-};
-
-/*
- * clocks specific to omap36xx
- */
-static struct omap_clk omap36xx_clks[] = {
-       CLK(NULL,       "omap_192m_alwon_fck", &omap_192m_alwon_fck),
-       CLK(NULL,       "uart4_fck",    &uart4_fck),
-};
-
-/*
- * clocks common to omap36xx omap34xx
- */
-static struct omap_clk omap34xx_omap36xx_clks[] = {
-       CLK(NULL,       "aes1_ick",     &aes1_ick),
-       CLK("omap_rng", "ick",          &rng_ick),
-       CLK("omap3-rom-rng",    "ick",  &rng_ick),
-       CLK(NULL,       "sha11_ick",    &sha11_ick),
-       CLK(NULL,       "des1_ick",     &des1_ick),
-       CLK(NULL,       "cam_mclk",     &cam_mclk),
-       CLK(NULL,       "cam_ick",      &cam_ick),
-       CLK(NULL,       "csi2_96m_fck", &csi2_96m_fck),
-       CLK(NULL,       "security_l3_ick", &security_l3_ick),
-       CLK(NULL,       "pka_ick",      &pka_ick),
-       CLK(NULL,       "icr_ick",      &icr_ick),
-       CLK("omap-aes", "ick",  &aes2_ick),
-       CLK("omap-sham",        "ick",  &sha12_ick),
-       CLK(NULL,       "des2_ick",     &des2_ick),
-       CLK(NULL,       "mspro_ick",    &mspro_ick),
-       CLK(NULL,       "mailboxes_ick", &mailboxes_ick),
-       CLK(NULL,       "ssi_l4_ick",   &ssi_l4_ick),
-       CLK(NULL,       "sr1_fck",      &sr1_fck),
-       CLK(NULL,       "sr2_fck",      &sr2_fck),
-       CLK(NULL,       "sr_l4_ick",    &sr_l4_ick),
-       CLK(NULL,       "security_l4_ick2", &security_l4_ick2),
-       CLK(NULL,       "wkup_l4_ick",  &wkup_l4_ick),
-       CLK(NULL,       "dpll2_fck",    &dpll2_fck),
-       CLK(NULL,       "iva2_ck",      &iva2_ck),
-       CLK(NULL,       "modem_fck",    &modem_fck),
-       CLK(NULL,       "sad2d_ick",    &sad2d_ick),
-       CLK(NULL,       "mad2d_ick",    &mad2d_ick),
-       CLK(NULL,       "mspro_fck",    &mspro_fck),
-       CLK(NULL,       "dpll2_ck",     &dpll2_ck),
-       CLK(NULL,       "dpll2_m2_ck",  &dpll2_m2_ck),
-};
-
-/*
- * clocks common to omap36xx and omap3430es2plus
- */
-static struct omap_clk omap36xx_omap3430es2plus_clks[] = {
-       CLK(NULL,       "ssi_ssr_fck",  &ssi_ssr_fck_3430es2),
-       CLK(NULL,       "ssi_sst_fck",  &ssi_sst_fck_3430es2),
-       CLK("musb-omap2430",    "ick",  &hsotgusb_ick_3430es2),
-       CLK(NULL,       "hsotgusb_ick", &hsotgusb_ick_3430es2),
-       CLK(NULL,       "ssi_ick",      &ssi_ick_3430es2),
-       CLK(NULL,       "usim_fck",     &usim_fck),
-       CLK(NULL,       "usim_ick",     &usim_ick),
-};
-
-/*
- * clocks common to am35xx omap36xx and omap3430es2plus
- */
-static struct omap_clk omap36xx_am35xx_omap3430es2plus_clks[] = {
-       CLK(NULL,       "virt_16_8m_ck", &virt_16_8m_ck),
-       CLK(NULL,       "dpll5_ck",     &dpll5_ck),
-       CLK(NULL,       "dpll5_m2_ck",  &dpll5_m2_ck),
-       CLK(NULL,       "sgx_fck",      &sgx_fck),
-       CLK(NULL,       "sgx_ick",      &sgx_ick),
-       CLK(NULL,       "cpefuse_fck",  &cpefuse_fck),
-       CLK(NULL,       "ts_fck",       &ts_fck),
-       CLK(NULL,       "usbtll_fck",   &usbtll_fck),
-       CLK(NULL,       "usbtll_ick",   &usbtll_ick),
-       CLK("omap_hsmmc.2",     "ick",  &mmchs3_ick),
-       CLK(NULL,       "mmchs3_ick",   &mmchs3_ick),
-       CLK(NULL,       "mmchs3_fck",   &mmchs3_fck),
-       CLK(NULL,       "dss1_alwon_fck",       &dss1_alwon_fck_3430es2),
-       CLK("omapdss_dss",      "ick",          &dss_ick_3430es2),
-       CLK(NULL,       "dss_ick",              &dss_ick_3430es2),
-       CLK(NULL,       "usbhost_120m_fck", &usbhost_120m_fck),
-       CLK(NULL,       "usbhost_48m_fck", &usbhost_48m_fck),
-       CLK(NULL,       "usbhost_ick",  &usbhost_ick),
-};
-
-/*
- * common clocks
- */
-static struct omap_clk omap3xxx_clks[] = {
-       CLK(NULL,       "apb_pclk",     &dummy_apb_pclk),
-       CLK(NULL,       "omap_32k_fck", &omap_32k_fck),
-       CLK(NULL,       "virt_12m_ck",  &virt_12m_ck),
-       CLK(NULL,       "virt_13m_ck",  &virt_13m_ck),
-       CLK(NULL,       "virt_19200000_ck", &virt_19200000_ck),
-       CLK(NULL,       "virt_26000000_ck", &virt_26000000_ck),
-       CLK(NULL,       "virt_38_4m_ck", &virt_38_4m_ck),
-       CLK(NULL,       "osc_sys_ck",   &osc_sys_ck),
-       CLK("twl",      "fck",          &osc_sys_ck),
-       CLK(NULL,       "sys_ck",       &sys_ck),
-       CLK(NULL,       "omap_96m_alwon_fck", &omap_96m_alwon_fck),
-       CLK("etb",      "emu_core_alwon_ck", &emu_core_alwon_ck),
-       CLK(NULL,       "sys_altclk",   &sys_altclk),
-       CLK(NULL,       "mcbsp_clks",   &mcbsp_clks),
-       CLK(NULL,       "sys_clkout1",  &sys_clkout1),
-       CLK(NULL,       "dpll1_ck",     &dpll1_ck),
-       CLK(NULL,       "dpll1_x2_ck",  &dpll1_x2_ck),
-       CLK(NULL,       "dpll1_x2m2_ck", &dpll1_x2m2_ck),
-       CLK(NULL,       "dpll3_ck",     &dpll3_ck),
-       CLK(NULL,       "core_ck",      &core_ck),
-       CLK(NULL,       "dpll3_x2_ck",  &dpll3_x2_ck),
-       CLK(NULL,       "dpll3_m2_ck",  &dpll3_m2_ck),
-       CLK(NULL,       "dpll3_m2x2_ck", &dpll3_m2x2_ck),
-       CLK(NULL,       "dpll3_m3_ck",  &dpll3_m3_ck),
-       CLK(NULL,       "dpll3_m3x2_ck", &dpll3_m3x2_ck),
-       CLK(NULL,       "dpll4_ck",     &dpll4_ck),
-       CLK(NULL,       "dpll4_x2_ck",  &dpll4_x2_ck),
-       CLK(NULL,       "omap_96m_fck", &omap_96m_fck),
-       CLK(NULL,       "cm_96m_fck",   &cm_96m_fck),
-       CLK(NULL,       "omap_54m_fck", &omap_54m_fck),
-       CLK(NULL,       "omap_48m_fck", &omap_48m_fck),
-       CLK(NULL,       "omap_12m_fck", &omap_12m_fck),
-       CLK(NULL,       "dpll4_m2_ck",  &dpll4_m2_ck),
-       CLK(NULL,       "dpll4_m2x2_ck", &dpll4_m2x2_ck),
-       CLK(NULL,       "dpll4_m3_ck",  &dpll4_m3_ck),
-       CLK(NULL,       "dpll4_m3x2_ck", &dpll4_m3x2_ck),
-       CLK(NULL,       "dpll4_m4_ck",  &dpll4_m4_ck),
-       CLK(NULL,       "dpll4_m4x2_ck", &dpll4_m4x2_ck),
-       CLK(NULL,       "dpll4_m5_ck",  &dpll4_m5_ck),
-       CLK(NULL,       "dpll4_m5x2_ck", &dpll4_m5x2_ck),
-       CLK(NULL,       "dpll4_m6_ck",  &dpll4_m6_ck),
-       CLK(NULL,       "dpll4_m6x2_ck", &dpll4_m6x2_ck),
-       CLK("etb",      "emu_per_alwon_ck", &emu_per_alwon_ck),
-       CLK(NULL,       "clkout2_src_ck", &clkout2_src_ck),
-       CLK(NULL,       "sys_clkout2",  &sys_clkout2),
-       CLK(NULL,       "corex2_fck",   &corex2_fck),
-       CLK(NULL,       "dpll1_fck",    &dpll1_fck),
-       CLK(NULL,       "mpu_ck",       &mpu_ck),
-       CLK(NULL,       "arm_fck",      &arm_fck),
-       CLK("etb",      "emu_mpu_alwon_ck", &emu_mpu_alwon_ck),
-       CLK(NULL,       "l3_ick",       &l3_ick),
-       CLK(NULL,       "l4_ick",       &l4_ick),
-       CLK(NULL,       "rm_ick",       &rm_ick),
-       CLK(NULL,       "gpt10_fck",    &gpt10_fck),
-       CLK(NULL,       "gpt11_fck",    &gpt11_fck),
-       CLK(NULL,       "core_96m_fck", &core_96m_fck),
-       CLK(NULL,       "mmchs2_fck",   &mmchs2_fck),
-       CLK(NULL,       "mmchs1_fck",   &mmchs1_fck),
-       CLK(NULL,       "i2c3_fck",     &i2c3_fck),
-       CLK(NULL,       "i2c2_fck",     &i2c2_fck),
-       CLK(NULL,       "i2c1_fck",     &i2c1_fck),
-       CLK(NULL,       "mcbsp5_fck",   &mcbsp5_fck),
-       CLK(NULL,       "mcbsp1_fck",   &mcbsp1_fck),
-       CLK(NULL,       "core_48m_fck", &core_48m_fck),
-       CLK(NULL,       "mcspi4_fck",   &mcspi4_fck),
-       CLK(NULL,       "mcspi3_fck",   &mcspi3_fck),
-       CLK(NULL,       "mcspi2_fck",   &mcspi2_fck),
-       CLK(NULL,       "mcspi1_fck",   &mcspi1_fck),
-       CLK(NULL,       "uart2_fck",    &uart2_fck),
-       CLK(NULL,       "uart1_fck",    &uart1_fck),
-       CLK(NULL,       "core_12m_fck", &core_12m_fck),
-       CLK("omap_hdq.0",       "fck",  &hdq_fck),
-       CLK(NULL,       "hdq_fck",      &hdq_fck),
-       CLK(NULL,       "core_l3_ick",  &core_l3_ick),
-       CLK(NULL,       "sdrc_ick",     &sdrc_ick),
-       CLK(NULL,       "gpmc_fck",     &gpmc_fck),
-       CLK(NULL,       "core_l4_ick",  &core_l4_ick),
-       CLK("omap_hsmmc.1",     "ick",  &mmchs2_ick),
-       CLK("omap_hsmmc.0",     "ick",  &mmchs1_ick),
-       CLK(NULL,       "mmchs2_ick",   &mmchs2_ick),
-       CLK(NULL,       "mmchs1_ick",   &mmchs1_ick),
-       CLK("omap_hdq.0", "ick",        &hdq_ick),
-       CLK(NULL,       "hdq_ick",      &hdq_ick),
-       CLK("omap2_mcspi.4", "ick",     &mcspi4_ick),
-       CLK("omap2_mcspi.3", "ick",     &mcspi3_ick),
-       CLK("omap2_mcspi.2", "ick",     &mcspi2_ick),
-       CLK("omap2_mcspi.1", "ick",     &mcspi1_ick),
-       CLK(NULL,       "mcspi4_ick",   &mcspi4_ick),
-       CLK(NULL,       "mcspi3_ick",   &mcspi3_ick),
-       CLK(NULL,       "mcspi2_ick",   &mcspi2_ick),
-       CLK(NULL,       "mcspi1_ick",   &mcspi1_ick),
-       CLK("omap_i2c.3", "ick",        &i2c3_ick),
-       CLK("omap_i2c.2", "ick",        &i2c2_ick),
-       CLK("omap_i2c.1", "ick",        &i2c1_ick),
-       CLK(NULL,       "i2c3_ick",     &i2c3_ick),
-       CLK(NULL,       "i2c2_ick",     &i2c2_ick),
-       CLK(NULL,       "i2c1_ick",     &i2c1_ick),
-       CLK(NULL,       "uart2_ick",    &uart2_ick),
-       CLK(NULL,       "uart1_ick",    &uart1_ick),
-       CLK(NULL,       "gpt11_ick",    &gpt11_ick),
-       CLK(NULL,       "gpt10_ick",    &gpt10_ick),
-       CLK("omap-mcbsp.5", "ick",      &mcbsp5_ick),
-       CLK("omap-mcbsp.1", "ick",      &mcbsp1_ick),
-       CLK(NULL,       "mcbsp5_ick",   &mcbsp5_ick),
-       CLK(NULL,       "mcbsp1_ick",   &mcbsp1_ick),
-       CLK(NULL,       "omapctrl_ick", &omapctrl_ick),
-       CLK(NULL,       "dss_tv_fck",   &dss_tv_fck),
-       CLK(NULL,       "dss_96m_fck",  &dss_96m_fck),
-       CLK(NULL,       "dss2_alwon_fck",       &dss2_alwon_fck),
-       CLK(NULL,       "init_60m_fclk",        &dummy_ck),
-       CLK(NULL,       "gpt1_fck",     &gpt1_fck),
-       CLK(NULL,       "aes2_ick",     &aes2_ick),
-       CLK(NULL,       "wkup_32k_fck", &wkup_32k_fck),
-       CLK(NULL,       "gpio1_dbck",   &gpio1_dbck),
-       CLK(NULL,       "sha12_ick",    &sha12_ick),
-       CLK(NULL,       "wdt2_fck",             &wdt2_fck),
-       CLK("omap_wdt", "ick",          &wdt2_ick),
-       CLK(NULL,       "wdt2_ick",     &wdt2_ick),
-       CLK(NULL,       "wdt1_ick",     &wdt1_ick),
-       CLK(NULL,       "gpio1_ick",    &gpio1_ick),
-       CLK(NULL,       "omap_32ksync_ick", &omap_32ksync_ick),
-       CLK(NULL,       "gpt12_ick",    &gpt12_ick),
-       CLK(NULL,       "gpt1_ick",     &gpt1_ick),
-       CLK(NULL,       "per_96m_fck",  &per_96m_fck),
-       CLK(NULL,       "per_48m_fck",  &per_48m_fck),
-       CLK(NULL,       "uart3_fck",    &uart3_fck),
-       CLK(NULL,       "gpt2_fck",     &gpt2_fck),
-       CLK(NULL,       "gpt3_fck",     &gpt3_fck),
-       CLK(NULL,       "gpt4_fck",     &gpt4_fck),
-       CLK(NULL,       "gpt5_fck",     &gpt5_fck),
-       CLK(NULL,       "gpt6_fck",     &gpt6_fck),
-       CLK(NULL,       "gpt7_fck",     &gpt7_fck),
-       CLK(NULL,       "gpt8_fck",     &gpt8_fck),
-       CLK(NULL,       "gpt9_fck",     &gpt9_fck),
-       CLK(NULL,       "per_32k_alwon_fck", &per_32k_alwon_fck),
-       CLK(NULL,       "gpio6_dbck",   &gpio6_dbck),
-       CLK(NULL,       "gpio5_dbck",   &gpio5_dbck),
-       CLK(NULL,       "gpio4_dbck",   &gpio4_dbck),
-       CLK(NULL,       "gpio3_dbck",   &gpio3_dbck),
-       CLK(NULL,       "gpio2_dbck",   &gpio2_dbck),
-       CLK(NULL,       "wdt3_fck",     &wdt3_fck),
-       CLK(NULL,       "per_l4_ick",   &per_l4_ick),
-       CLK(NULL,       "gpio6_ick",    &gpio6_ick),
-       CLK(NULL,       "gpio5_ick",    &gpio5_ick),
-       CLK(NULL,       "gpio4_ick",    &gpio4_ick),
-       CLK(NULL,       "gpio3_ick",    &gpio3_ick),
-       CLK(NULL,       "gpio2_ick",    &gpio2_ick),
-       CLK(NULL,       "wdt3_ick",     &wdt3_ick),
-       CLK(NULL,       "uart3_ick",    &uart3_ick),
-       CLK(NULL,       "uart4_ick",    &uart4_ick),
-       CLK(NULL,       "gpt9_ick",     &gpt9_ick),
-       CLK(NULL,       "gpt8_ick",     &gpt8_ick),
-       CLK(NULL,       "gpt7_ick",     &gpt7_ick),
-       CLK(NULL,       "gpt6_ick",     &gpt6_ick),
-       CLK(NULL,       "gpt5_ick",     &gpt5_ick),
-       CLK(NULL,       "gpt4_ick",     &gpt4_ick),
-       CLK(NULL,       "gpt3_ick",     &gpt3_ick),
-       CLK(NULL,       "gpt2_ick",     &gpt2_ick),
-       CLK("omap-mcbsp.2", "ick",      &mcbsp2_ick),
-       CLK("omap-mcbsp.3", "ick",      &mcbsp3_ick),
-       CLK("omap-mcbsp.4", "ick",      &mcbsp4_ick),
-       CLK(NULL,       "mcbsp4_ick",   &mcbsp2_ick),
-       CLK(NULL,       "mcbsp3_ick",   &mcbsp3_ick),
-       CLK(NULL,       "mcbsp2_ick",   &mcbsp4_ick),
-       CLK(NULL,       "mcbsp2_fck",   &mcbsp2_fck),
-       CLK(NULL,       "mcbsp3_fck",   &mcbsp3_fck),
-       CLK(NULL,       "mcbsp4_fck",   &mcbsp4_fck),
-       CLK("etb",      "emu_src_ck",   &emu_src_ck),
-       CLK(NULL,       "emu_src_ck",   &emu_src_ck),
-       CLK(NULL,       "pclk_fck",     &pclk_fck),
-       CLK(NULL,       "pclkx2_fck",   &pclkx2_fck),
-       CLK(NULL,       "atclk_fck",    &atclk_fck),
-       CLK(NULL,       "traceclk_src_fck", &traceclk_src_fck),
-       CLK(NULL,       "traceclk_fck", &traceclk_fck),
-       CLK(NULL,       "secure_32k_fck", &secure_32k_fck),
-       CLK(NULL,       "gpt12_fck",    &gpt12_fck),
-       CLK(NULL,       "wdt1_fck",     &wdt1_fck),
-       CLK(NULL,       "timer_32k_ck", &omap_32k_fck),
-       CLK(NULL,       "timer_sys_ck", &sys_ck),
-       CLK(NULL,       "cpufreq_ck",   &dpll1_ck),
-};
-
-static const char *enable_init_clks[] = {
-       "sdrc_ick",
-       "gpmc_fck",
-       "omapctrl_ick",
-};
-
-int __init omap3xxx_clk_init(void)
-{
-       if (omap3_has_192mhz_clk())
-               omap_96m_alwon_fck = omap_96m_alwon_fck_3630;
-
-       if (cpu_is_omap3630()) {
-               dpll3_m3x2_ck = dpll3_m3x2_ck_3630;
-               dpll4_m2x2_ck = dpll4_m2x2_ck_3630;
-               dpll4_m3x2_ck = dpll4_m3x2_ck_3630;
-               dpll4_m4x2_ck = dpll4_m4x2_ck_3630;
-               dpll4_m5x2_ck = dpll4_m5x2_ck_3630;
-               dpll4_m6x2_ck = dpll4_m6x2_ck_3630;
-       }
-
-       /*
-        * XXX This type of dynamic rewriting of the clock tree is
-        * deprecated and should be revised soon.
-        */
-       if (cpu_is_omap3630())
-               dpll4_dd = dpll4_dd_3630;
-       else
-               dpll4_dd = dpll4_dd_34xx;
-
-
-       /*
-        * 3505 must be tested before 3517, since 3517 returns true
-        * for both AM3517 chips and AM3517 family chips, which
-        * includes 3505.  Unfortunately there's no obvious family
-        * test for 3517/3505 :-(
-        */
-       if (soc_is_am35xx()) {
-               cpu_mask = RATE_IN_34XX;
-               omap_clocks_register(am35xx_clks, ARRAY_SIZE(am35xx_clks));
-               omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
-                                    ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
-               omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks));
-       } else if (cpu_is_omap3630()) {
-               cpu_mask = (RATE_IN_34XX | RATE_IN_36XX);
-               omap_clocks_register(omap36xx_clks, ARRAY_SIZE(omap36xx_clks));
-               omap_clocks_register(omap36xx_omap3430es2plus_clks,
-                                    ARRAY_SIZE(omap36xx_omap3430es2plus_clks));
-               omap_clocks_register(omap34xx_omap36xx_clks,
-                                    ARRAY_SIZE(omap34xx_omap36xx_clks));
-               omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
-                                    ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
-               omap_clocks_register(omap3xxx_clks, ARRAY_SIZE(omap3xxx_clks));
-       } else if (cpu_is_omap34xx()) {
-               if (omap_rev() == OMAP3430_REV_ES1_0) {
-                       cpu_mask = RATE_IN_3430ES1;
-                       omap_clocks_register(omap3430es1_clks,
-                                            ARRAY_SIZE(omap3430es1_clks));
-                       omap_clocks_register(omap34xx_omap36xx_clks,
-                                            ARRAY_SIZE(omap34xx_omap36xx_clks));
-                       omap_clocks_register(omap3xxx_clks,
-                                            ARRAY_SIZE(omap3xxx_clks));
-               } else {
-                       /*
-                        * Assume that anything that we haven't matched yet
-                        * has 3430ES2-type clocks.
-                        */
-                       cpu_mask = RATE_IN_3430ES2PLUS;
-                       omap_clocks_register(omap34xx_omap36xx_clks,
-                                            ARRAY_SIZE(omap34xx_omap36xx_clks));
-                       omap_clocks_register(omap36xx_omap3430es2plus_clks,
-                                            ARRAY_SIZE(omap36xx_omap3430es2plus_clks));
-                       omap_clocks_register(omap36xx_am35xx_omap3430es2plus_clks,
-                                            ARRAY_SIZE(omap36xx_am35xx_omap3430es2plus_clks));
-                       omap_clocks_register(omap3xxx_clks,
-                                            ARRAY_SIZE(omap3xxx_clks));
-               }
-       } else {
-               WARN(1, "clock: could not identify OMAP3 variant\n");
-       }
-
-               omap2_clk_disable_autoidle_all();
-
-       omap2_clk_enable_init_clocks(enable_init_clks,
-                                    ARRAY_SIZE(enable_init_clks));
-
-       pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
-               (clk_get_rate(&osc_sys_ck) / 1000000),
-               (clk_get_rate(&osc_sys_ck) / 100000) % 10,
-               (clk_get_rate(&core_ck) / 1000000),
-               (clk_get_rate(&arm_fck) / 1000000));
-
-       /*
-        * Lock DPLL5 -- here only until other device init code can
-        * handle this
-        */
-       if (omap_rev() >= OMAP3430_REV_ES2_0)
-               omap3_clk_lock_dpll5();
-
-       /* Avoid sleeping during omap3_core_dpll_m2_set_rate() */
-       sdrc_ick_p = clk_get(NULL, "sdrc_ick");
-       arm_fck_p = clk_get(NULL, "arm_fck");
-
-       return 0;
-}
index 4ae4ccebced285e0598282028a3ad909fe3ed26c..6124db5c37aebf5d3092b66c355c326bdf6815f9 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/clk-provider.h>
 #include <linux/io.h>
 #include <linux/bitops.h>
-#include <linux/clk-private.h>
 #include <asm/cpu.h>
 
 #include <trace/events/power.h>
@@ -632,21 +631,6 @@ const struct clk_hw_omap_ops clkhwops_wait = {
        .find_companion = omap2_clk_dflt_find_companion,
 };
 
-/**
- * omap_clocks_register - register an array of omap_clk
- * @ocs: pointer to an array of omap_clk to register
- */
-void __init omap_clocks_register(struct omap_clk oclks[], int cnt)
-{
-       struct omap_clk *c;
-
-       for (c = oclks; c < oclks + cnt; c++) {
-               clkdev_add(&c->lk);
-               if (!__clk_init(NULL, c->lk.clk))
-                       omap2_init_clk_hw_omap_clocks(c->lk.clk);
-       }
-}
-
 /**
  * omap2_clk_switch_mpurate_at_boot - switch ARM MPU rate by boot-time argument
  * @mpurate_ck_name: clk name of the clock to change rate
index 1cf9dd85248abbe6511ac8c59133f9f290327de7..a56742f96000a64eb125010903304fb0d7e37414 100644 (file)
@@ -40,23 +40,29 @@ struct omap_clk {
 struct clockdomain;
 
 #define DEFINE_STRUCT_CLK(_name, _parent_array_name, _clkops_name)     \
-       static struct clk _name = {                             \
+       static struct clk_core _name##_core = {                 \
                .name = #_name,                                 \
                .hw = &_name##_hw.hw,                           \
                .parent_names = _parent_array_name,             \
                .num_parents = ARRAY_SIZE(_parent_array_name),  \
                .ops = &_clkops_name,                           \
+       };                                                      \
+       static struct clk _name = {                             \
+               .core = &_name##_core,                          \
        };
 
 #define DEFINE_STRUCT_CLK_FLAGS(_name, _parent_array_name,     \
                                _clkops_name, _flags)           \
-       static struct clk _name = {                             \
+       static struct clk_core _name##_core = {                 \
                .name = #_name,                                 \
                .hw = &_name##_hw.hw,                           \
                .parent_names = _parent_array_name,             \
                .num_parents = ARRAY_SIZE(_parent_array_name),  \
                .ops = &_clkops_name,                           \
                .flags = _flags,                                \
+       };                                                      \
+       static struct clk _name = {                             \
+               .core = &_name##_core,                          \
        };
 
 #define DEFINE_STRUCT_CLK_HW_OMAP(_name, _clkdm_name)          \
@@ -238,7 +244,6 @@ struct ti_clk_features {
 extern struct ti_clk_features ti_clk_features;
 
 extern const struct clkops clkops_omap2_dflt_wait;
-extern const struct clkops clkops_dummy;
 extern const struct clkops clkops_omap2_dflt;
 
 extern struct clk_functions omap2_clk_functions;
@@ -247,7 +252,6 @@ extern const struct clksel_rate gpt_32k_rates[];
 extern const struct clksel_rate gpt_sys_rates[];
 extern const struct clksel_rate gfx_l3_rates[];
 extern const struct clksel_rate dsp_ick_rates[];
-extern struct clk dummy_ck;
 
 extern const struct clk_hw_omap_ops clkhwops_iclk_wait;
 extern const struct clk_hw_omap_ops clkhwops_wait;
@@ -272,7 +276,5 @@ extern void __iomem *clk_memmaps[];
 extern int omap2_clkops_enable_clkdm(struct clk_hw *hw);
 extern void omap2_clkops_disable_clkdm(struct clk_hw *hw);
 
-extern void omap_clocks_register(struct omap_clk *oclks, int cnt);
-
 void __init ti_clk_init_features(void);
 #endif
index ef4d21bfb96478da0b9ef681c931303aa9fb1bf4..61b60dfb14ce8a69e7d316aa17d831c7469d0222 100644 (file)
@@ -16,7 +16,6 @@
  * OMAP3xxx clock definition files.
  */
 
-#include <linux/clk-private.h>
 #include "clock.h"
 
 /* clksel_rate data common to 24xx/343x */
@@ -114,13 +113,3 @@ const struct clksel_rate div31_1to31_rates[] = {
        { .div = 31, .val = 31, .flags = RATE_IN_4430 | RATE_IN_AM33XX },
        { .div = 0 },
 };
-
-/* Clocks shared between various OMAP SoCs */
-
-static struct clk_ops dummy_ck_ops = {};
-
-struct clk dummy_ck = {
-       .name = "dummy_clk",
-       .ops = &dummy_ck_ops,
-       .flags = CLK_IS_BASIC,
-};
index c2da2a0fe5ad64df80d6290f45658c697cfd4c96..44e57ec225d4401c1e2a81fcbfd929e499e65d58 100644 (file)
@@ -410,7 +410,7 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
        struct clk_hw_omap *clk = to_clk_hw_omap(hw);
        int r;
        struct dpll_data *dd;
-       struct clk *parent;
+       struct clk_hw *parent;
 
        dd = clk->dpll_data;
        if (!dd)
@@ -427,13 +427,13 @@ int omap3_noncore_dpll_enable(struct clk_hw *hw)
                }
        }
 
-       parent = __clk_get_parent(hw->clk);
+       parent = __clk_get_hw(__clk_get_parent(hw->clk));
 
        if (__clk_get_rate(hw->clk) == __clk_get_rate(dd->clk_bypass)) {
-               WARN_ON(parent != dd->clk_bypass);
+               WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
                r = _omap3_noncore_dpll_bypass(clk);
        } else {
-               WARN_ON(parent != dd->clk_ref);
+               WARN_ON(parent != __clk_get_hw(dd->clk_ref));
                r = _omap3_noncore_dpll_lock(clk);
        }
 
@@ -473,6 +473,8 @@ void omap3_noncore_dpll_disable(struct clk_hw *hw)
  * in failure.
  */
 long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long min_rate,
+                                      unsigned long max_rate,
                                       unsigned long *best_parent_rate,
                                       struct clk_hw **best_parent_clk)
 {
@@ -549,7 +551,8 @@ int omap3_noncore_dpll_set_rate(struct clk_hw *hw, unsigned long rate,
        if (!dd)
                return -EINVAL;
 
-       if (__clk_get_parent(hw->clk) != dd->clk_ref)
+       if (__clk_get_hw(__clk_get_parent(hw->clk)) !=
+           __clk_get_hw(dd->clk_ref))
                return -EINVAL;
 
        if (dd->last_rounded_rate == 0)
index fc712240e5fd9d5173daca2df199d46e3ae521d9..f231be05b9a638de8e52cfe03d78765433815764 100644 (file)
@@ -202,6 +202,8 @@ out:
  * in failure.
  */
 long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
index e60780f0537492fb570e646e9bc4897101cab022..c4871c55bd8b641544a86a281121cad4c9219e42 100644 (file)
@@ -461,7 +461,17 @@ void __init omap3_init_early(void)
        omap3xxx_clockdomains_init();
        omap3xxx_hwmod_init();
        omap_hwmod_init_postsetup();
-       omap_clk_soc_init = omap3xxx_clk_init;
+       if (!of_have_populated_dt()) {
+               omap3_prcm_legacy_iomaps_init();
+               if (soc_is_am35xx())
+                       omap_clk_soc_init = am35xx_clk_legacy_init;
+               else if (cpu_is_omap3630())
+                       omap_clk_soc_init = omap36xx_clk_legacy_init;
+               else if (omap_rev() == OMAP3430_REV_ES1_0)
+                       omap_clk_soc_init = omap3430es1_clk_legacy_init;
+               else
+                       omap_clk_soc_init = omap3430_clk_legacy_init;
+       }
 }
 
 void __init omap3430_init_early(void)
@@ -753,15 +763,17 @@ int __init omap_clk_init(void)
 
        ti_clk_init_features();
 
-       ret = of_prcm_init();
-       if (ret)
-               return ret;
+       if (of_have_populated_dt()) {
+               ret = of_prcm_init();
+               if (ret)
+                       return ret;
 
-       of_clk_init(NULL);
+               of_clk_init(NULL);
 
-       ti_dt_clk_init_retry_clks();
+               ti_dt_clk_init_retry_clks();
 
-       ti_dt_clockdomains_setup();
+               ti_dt_clockdomains_setup();
+       }
 
        ret = omap_clk_soc_init();
 
index 2418bdf28ca271599ae108824abe3b6493d0bbf4..cee0fe1ee6ffb0d3e5026a7328458feb34dc2732 100644 (file)
@@ -242,7 +242,7 @@ static int __init omap4_sar_ram_init(void)
 }
 omap_early_initcall(omap4_sar_ram_init);
 
-static struct of_device_id gic_match[] = {
+static const struct of_device_id gic_match[] = {
        { .compatible = "arm,cortex-a9-gic", },
        { .compatible = "arm,cortex-a15-gic", },
        { },
index 77752e49d8d4c666a2f48fb816546ccfbf53b212..b9061a6a2db8998314cf83dc0aa98dab2899ce43 100644 (file)
@@ -20,6 +20,7 @@ extern void __iomem *prm_base;
 extern u16 prm_features;
 extern void omap2_set_globals_prm(void __iomem *prm);
 int of_prcm_init(void);
+void omap3_prcm_legacy_iomaps_init(void);
 # endif
 
 /*
index c5e00c6714b1d99fc5afaabe3f41985e1fce4bad..5713bbdf83bc57ac7314f6e27455e3851772bc09 100644 (file)
@@ -674,7 +674,7 @@ int __init omap3xxx_prm_init(void)
        return prm_register(&omap3xxx_prm_ll_data);
 }
 
-static struct of_device_id omap3_prm_dt_match_table[] = {
+static const struct of_device_id omap3_prm_dt_match_table[] = {
        { .compatible = "ti,omap3-prm" },
        { }
 };
index 408c64efb80700868fa4c8b0138a2763a78bc161..a08a617a6c110365cf20ce9c5df54edef19c20c5 100644 (file)
@@ -712,7 +712,7 @@ int __init omap44xx_prm_init(void)
        return prm_register(&omap44xx_prm_ll_data);
 }
 
-static struct of_device_id omap_prm_dt_match_table[] = {
+static const struct of_device_id omap_prm_dt_match_table[] = {
        { .compatible = "ti,omap4-prm" },
        { .compatible = "ti,omap5-prm" },
        { .compatible = "ti,dra7-prm" },
index 264b5e29404d0eded3c9eca764384e8d93a5e563..bfaa7ba595cc832ec7783e759db4425c5e1e58c0 100644 (file)
@@ -35,6 +35,8 @@
 #include "prm44xx.h"
 #include "common.h"
 #include "clock.h"
+#include "cm.h"
+#include "control.h"
 
 /*
  * OMAP_PRCM_MAX_NR_PENDING_REG: maximum number of PRM_IRQ*_MPU regs
@@ -641,6 +643,15 @@ int __init of_prcm_init(void)
        return 0;
 }
 
+void __init omap3_prcm_legacy_iomaps_init(void)
+{
+       ti_clk_ll_ops = &omap_clk_ll_ops;
+
+       clk_memmaps[TI_CLKM_CM] = cm_base + OMAP3430_IVA2_MOD;
+       clk_memmaps[TI_CLKM_PRM] = prm_base + OMAP3430_IVA2_MOD;
+       clk_memmaps[TI_CLKM_SCRM] = omap_ctrl_base_get();
+}
+
 static int __init prm_late_init(void)
 {
        if (prm_ll_data->late_init)
index a219dc310d5de545527030fe169b88a494c0578a..e03d8b5c9ad0aa174b46c2e54cf2ade518d4cfdc 100644 (file)
@@ -27,7 +27,6 @@ config ARCH_ATLAS7
        select CPU_V7
        select HAVE_ARM_SCU if SMP
        select HAVE_SMP
-       select SMP_ON_UP if SMP
        help
           Support for CSR SiRFSoC ARM Cortex A7 Platform
 
index 0c819bb88418369bab10f336766c9974c89f48dc..8cadb302a7d2f54a3bbcddaf7296606293d69e4e 100644 (file)
@@ -21,7 +21,7 @@ static void __init sirfsoc_init_late(void)
 }
 
 #ifdef CONFIG_ARCH_ATLAS6
-static const char *atlas6_dt_match[] __initconst = {
+static const char *const atlas6_dt_match[] __initconst = {
        "sirf,atlas6",
        NULL
 };
@@ -36,7 +36,7 @@ MACHINE_END
 #endif
 
 #ifdef CONFIG_ARCH_PRIMA2
-static const char *prima2_dt_match[] __initconst = {
+static const char *const prima2_dt_match[] __initconst = {
        "sirf,prima2",
        NULL
 };
@@ -52,7 +52,7 @@ MACHINE_END
 #endif
 
 #ifdef CONFIG_ARCH_ATLAS7
-static const char *atlas7_dt_match[] __initdata = {
+static const char *const atlas7_dt_match[] __initconst = {
        "sirf,atlas7",
        NULL
 };
index fc2b03c81e5f57f2f6be7146afa7022f3f22d476..e46c91094dde3c66065b4d7e040ef7a057d9d04a 100644 (file)
@@ -40,7 +40,7 @@ static void sirfsoc_secondary_init(unsigned int cpu)
        spin_unlock(&boot_lock);
 }
 
-static struct of_device_id clk_ids[]  = {
+static const struct of_device_id clk_ids[]  = {
        { .compatible = "sirf,atlas7-clkc" },
        {},
 };
index 5078932c1683278c45cb57a491483427132679c1..ae4eb7cc4bcc5a1c5dffa8a26f98e5c58b6cf3f8 100644 (file)
@@ -11,6 +11,7 @@ config ARCH_ROCKCHIP
        select HAVE_ARM_SCU if SMP
        select HAVE_ARM_TWD if SMP
        select DW_APB_TIMER_OF
+       select REGULATOR if PM
        select ROCKCHIP_TIMER
        select ARM_GLOBAL_TIMER
        select CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
index 7d752ff39f91f4a7d737e460f3d09e089e1383f6..7c889c04604b5ec0d9faaec30d0b612275c4cda4 100644 (file)
@@ -24,7 +24,13 @@ extern unsigned long rkpm_bootdata_ddr_data;
 extern unsigned long rk3288_bootram_sz;
 
 void rockchip_slp_cpu_resume(void);
+#ifdef CONFIG_PM_SLEEP
 void __init rockchip_suspend_init(void);
+#else
+static inline void rockchip_suspend_init(void)
+{
+}
+#endif
 
 /****** following is rk3288 defined **********/
 #define RK3288_PMU_WAKEUP_CFG0         0x00
index 43eb1eaea0c927f8e24e94f27831d92931172ab1..83e656ea95ae13f1ed23003d6872479370e43abc 100644 (file)
@@ -63,7 +63,7 @@ static void __init s5pv210_dt_init_late(void)
        s5pv210_pm_init();
 }
 
-static char const *s5pv210_dt_compat[] __initconst = {
+static char const *const s5pv210_dt_compat[] __initconst = {
        "samsung,s5pc110",
        "samsung,s5pv210",
        NULL
index aad97be9cbe1b0fabe069136e2e264cc51de8a24..37f7b15c01bc073678b49189c4b9d4a5a9aacd63 100644 (file)
@@ -37,7 +37,7 @@ static void __init emev2_map_io(void)
        iotable_init(emev2_io_desc, ARRAY_SIZE(emev2_io_desc));
 }
 
-static const char *emev2_boards_compat_dt[] __initconst = {
+static const char *const emev2_boards_compat_dt[] __initconst = {
        "renesas,emev2",
        NULL,
 };
index 8825bc9e2553057145a9d2839bf4001e71960bd8..3b1ac463a4947f21f3e82de66d8853a902367fa4 100644 (file)
@@ -13,6 +13,7 @@ menuconfig ARCH_STI
        select ARM_ERRATA_775420
        select PL310_ERRATA_753970 if CACHE_L2X0
        select PL310_ERRATA_769419 if CACHE_L2X0
+       select RESET_CONTROLLER
        help
          Include support for STiH41x SOCs like STiH415/416 using the device tree
          for discovery
index ef016af1c9e769176378e2930f24dc2f060adc09..914341bcef25faf08631113ce98b90228d728d9a 100644 (file)
@@ -91,8 +91,6 @@ static void __init tegra_dt_init(void)
        struct soc_device *soc_dev;
        struct device *parent = NULL;
 
-       tegra_clocks_apply_init_table();
-
        soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
        if (!soc_dev_attr)
                goto out;
index 0d4b5b46f15b551f191bedaa6f0b9394da84dad1..4d71c90f801caf6ac5a6e4d150c7751e6066f2da 100644 (file)
@@ -49,7 +49,7 @@ static struct generic_pm_domain *ux500_pm_domains[NR_DOMAINS] = {
        [DOMAIN_VAPE] = &ux500_pm_domain_vape,
 };
 
-static struct of_device_id ux500_pm_domain_matches[] = {
+static const struct of_device_id ux500_pm_domain_matches[] __initconst = {
        { .compatible = "stericsson,ux500-pm-domains", },
        { },
 };
index 9f9bc61ca64bc6af4ddf2e7bfe7e2d6ccac3fe55..7de3e92a13b0ef8896c56a9101e21ea7a8db6411 100644 (file)
@@ -35,7 +35,7 @@ static void __init versatile_dt_init(void)
                             versatile_auxdata_lookup, NULL);
 }
 
-static const char *versatile_dt_match[] __initconst = {
+static const char *const versatile_dt_match[] __initconst = {
        "arm,versatile-ab",
        "arm,versatile-pb",
        NULL,
index d6b16d9a78380e78ff7d33855f1c6caa557d2e83..3c2509b4b6946bfcfd9b4e7a325b3945ba6c244c 100644 (file)
@@ -73,6 +73,7 @@ config ARCH_VEXPRESS_TC2_PM
        depends on MCPM
        select ARM_CCI
        select ARCH_VEXPRESS_SPC
+       select ARM_CPU_SUSPEND
        help
          Support for CPU and cluster power management on Versatile Express
          with a TC2 (A15x2 A7x3) big.LITTLE core tile.
index c43c714555661337048b72a5a21a6b5357659567..9b4f29e595a423f6d00540a9a169decae0f3b4ea 100644 (file)
@@ -892,13 +892,6 @@ config CACHE_L2X0
 
 if CACHE_L2X0
 
-config CACHE_PL310
-       bool
-       default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
-       help
-         This option enables optimisations for the PL310 cache
-         controller.
-
 config PL310_ERRATA_588369
        bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
        help
index 903dba064a034c7e5d9fff950d3fa334301130d9..170a116d1b298c1befb81efdeaee49735362fd26 100644 (file)
@@ -1106,7 +1106,7 @@ static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
        int i = 0;
 
        if (array_size <= PAGE_SIZE)
-               pages = kzalloc(array_size, gfp);
+               pages = kzalloc(array_size, GFP_KERNEL);
        else
                pages = vzalloc(array_size);
        if (!pages)
diff --git a/arch/blackfin/include/asm/bfin_rotary.h b/arch/blackfin/include/asm/bfin_rotary.h
deleted file mode 100644 (file)
index 8895a75..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * board initialization should put one of these structures into platform_data
- * and place the bfin-rotary onto platform_bus named "bfin-rotary".
- *
- * Copyright 2008-2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#ifndef _BFIN_ROTARY_H
-#define _BFIN_ROTARY_H
-
-/* mode bitmasks */
-#define ROT_QUAD_ENC   CNTMODE_QUADENC /* quadrature/grey code encoder mode */
-#define ROT_BIN_ENC    CNTMODE_BINENC  /* binary encoder mode */
-#define ROT_UD_CNT     CNTMODE_UDCNT   /* rotary counter mode */
-#define ROT_DIR_CNT    CNTMODE_DIRCNT  /* direction counter mode */
-
-#define ROT_DEBE       DEBE            /* Debounce Enable */
-
-#define ROT_CDGINV     CDGINV          /* CDG Pin Polarity Invert */
-#define ROT_CUDINV     CUDINV          /* CUD Pin Polarity Invert */
-#define ROT_CZMINV     CZMINV          /* CZM Pin Polarity Invert */
-
-struct bfin_rotary_platform_data {
-       /* set rotary UP KEY_### or BTN_### in case you prefer
-        * bfin-rotary to send EV_KEY otherwise set 0
-        */
-       unsigned int rotary_up_key;
-       /* set rotary DOWN KEY_### or BTN_### in case you prefer
-        * bfin-rotary to send EV_KEY otherwise set 0
-        */
-       unsigned int rotary_down_key;
-       /* set rotary BUTTON KEY_### or BTN_### */
-       unsigned int rotary_button_key;
-       /* set rotary Relative Axis REL_### in case you prefer
-        * bfin-rotary to send EV_REL otherwise set 0
-        */
-       unsigned int rotary_rel_code;
-       unsigned short debounce;        /* 0..17 */
-       unsigned short mode;
-       unsigned short pm_wakeup;
-};
-
-/* CNT_CONFIG bitmasks */
-#define CNTE           (1 << 0)        /* Counter Enable */
-#define DEBE           (1 << 1)        /* Debounce Enable */
-#define CDGINV         (1 << 4)        /* CDG Pin Polarity Invert */
-#define CUDINV         (1 << 5)        /* CUD Pin Polarity Invert */
-#define CZMINV         (1 << 6)        /* CZM Pin Polarity Invert */
-#define CNTMODE_SHIFT  8
-#define CNTMODE                (0x7 << CNTMODE_SHIFT)  /* Counter Operating Mode */
-#define ZMZC           (1 << 1)        /* CZM Zeroes Counter Enable */
-#define BNDMODE_SHIFT  12
-#define BNDMODE                (0x3 << BNDMODE_SHIFT)  /* Boundary register Mode */
-#define INPDIS         (1 << 15)       /* CUG and CDG Input Disable */
-
-#define CNTMODE_QUADENC        (0 << CNTMODE_SHIFT)    /* quadrature encoder mode */
-#define CNTMODE_BINENC (1 << CNTMODE_SHIFT)    /* binary encoder mode */
-#define CNTMODE_UDCNT  (2 << CNTMODE_SHIFT)    /* up/down counter mode */
-#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT)    /* direction counter mode */
-#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT)    /* direction timer mode */
-
-#define BNDMODE_COMP   (0 << BNDMODE_SHIFT)    /* boundary compare mode */
-#define BNDMODE_ZERO   (1 << BNDMODE_SHIFT)    /* boundary compare and zero mode */
-#define BNDMODE_CAPT   (2 << BNDMODE_SHIFT)    /* boundary capture mode */
-#define BNDMODE_AEXT   (3 << BNDMODE_SHIFT)    /* boundary auto-extend mode */
-
-/* CNT_IMASK bitmasks */
-#define ICIE           (1 << 0)        /* Illegal Gray/Binary Code Interrupt Enable */
-#define UCIE           (1 << 1)        /* Up count Interrupt Enable */
-#define DCIE           (1 << 2)        /* Down count Interrupt Enable */
-#define MINCIE         (1 << 3)        /* Min Count Interrupt Enable */
-#define MAXCIE         (1 << 4)        /* Max Count Interrupt Enable */
-#define COV31IE                (1 << 5)        /* Bit 31 Overflow Interrupt Enable */
-#define COV15IE                (1 << 6)        /* Bit 15 Overflow Interrupt Enable */
-#define CZEROIE                (1 << 7)        /* Count to Zero Interrupt Enable */
-#define CZMIE          (1 << 8)        /* CZM Pin Interrupt Enable */
-#define CZMEIE         (1 << 9)        /* CZM Error Interrupt Enable */
-#define CZMZIE         (1 << 10)       /* CZM Zeroes Counter Interrupt Enable */
-
-/* CNT_STATUS bitmasks */
-#define ICII           (1 << 0)        /* Illegal Gray/Binary Code Interrupt Identifier */
-#define UCII           (1 << 1)        /* Up count Interrupt Identifier */
-#define DCII           (1 << 2)        /* Down count Interrupt Identifier */
-#define MINCII         (1 << 3)        /* Min Count Interrupt Identifier */
-#define MAXCII         (1 << 4)        /* Max Count Interrupt Identifier */
-#define COV31II                (1 << 5)        /* Bit 31 Overflow Interrupt Identifier */
-#define COV15II                (1 << 6)        /* Bit 15 Overflow Interrupt Identifier */
-#define CZEROII                (1 << 7)        /* Count to Zero Interrupt Identifier */
-#define CZMII          (1 << 8)        /* CZM Pin Interrupt Identifier */
-#define CZMEII         (1 << 9)        /* CZM Error Interrupt Identifier */
-#define CZMZII         (1 << 10)       /* CZM Zeroes Counter Interrupt Identifier */
-
-/* CNT_COMMAND bitmasks */
-#define W1LCNT         0xf             /* Load Counter Register */
-#define W1LMIN         0xf0            /* Load Min Register */
-#define W1LMAX         0xf00           /* Load Max Register */
-#define W1ZMONCE       (1 << 12)       /* Enable CZM Clear Counter Once */
-
-#define W1LCNT_ZERO    (1 << 0)        /* write 1 to load CNT_COUNTER with zero */
-#define W1LCNT_MIN     (1 << 2)        /* write 1 to load CNT_COUNTER from CNT_MIN */
-#define W1LCNT_MAX     (1 << 3)        /* write 1 to load CNT_COUNTER from CNT_MAX */
-
-#define W1LMIN_ZERO    (1 << 4)        /* write 1 to load CNT_MIN with zero */
-#define W1LMIN_CNT     (1 << 5)        /* write 1 to load CNT_MIN from CNT_COUNTER */
-#define W1LMIN_MAX     (1 << 7)        /* write 1 to load CNT_MIN from CNT_MAX */
-
-#define W1LMAX_ZERO    (1 << 8)        /* write 1 to load CNT_MAX with zero */
-#define W1LMAX_CNT     (1 << 9)        /* write 1 to load CNT_MAX from CNT_COUNTER */
-#define W1LMAX_MIN     (1 << 10)       /* write 1 to load CNT_MAX from CNT_MIN */
-
-/* CNT_DEBOUNCE bitmasks */
-#define DPRESCALE      0xf             /* Load Counter Register */
-
-#endif
index 9501bd8d9cd193e94f5a7a1896fb3ab8b1cb27fe..68f2a8a806ead46ab1567d0c724c5109378da264 100644 (file)
@@ -666,7 +666,14 @@ static struct platform_device bfin_sport1_uart_device = {
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
+
+static const u16 per_cnt[] = {
+       P_CNT_CUD,
+       P_CNT_CDG,
+       P_CNT_CZM,
+       0
+};
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
        /*.rotary_up_key     = KEY_UP,*/
@@ -676,9 +683,15 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
        .debounce          = 10,        /* 0..17 */
        .mode              = ROT_QUAD_ENC | ROT_DEBE,
        .pm_wakeup         = 1,
+       .pin_list          = per_cnt,
 };
 
 static struct resource bfin_rotary_resources[] = {
+       {
+               .start = CNT_CONFIG,
+               .end   = CNT_CONFIG + 0xff,
+               .flags = IORESOURCE_MEM,
+       },
        {
                .start = IRQ_CNT,
                .end = IRQ_CNT,
index d64f565dc2a0aaafcbaf53af5f33b1738d7ef2f7..d4219e8e5ab865fb196d4689ed315bdf2d255e85 100644 (file)
@@ -1092,7 +1092,14 @@ static struct platform_device bfin_device_gpiokeys = {
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
+
+static const u16 per_cnt[] = {
+       P_CNT_CUD,
+       P_CNT_CDG,
+       P_CNT_CZM,
+       0
+};
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
        /*.rotary_up_key     = KEY_UP,*/
@@ -1102,9 +1109,15 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
        .debounce          = 10,        /* 0..17 */
        .mode              = ROT_QUAD_ENC | ROT_DEBE,
        .pm_wakeup         = 1,
+       .pin_list          = per_cnt,
 };
 
 static struct resource bfin_rotary_resources[] = {
+       {
+               .start = CNT_CONFIG,
+               .end   = CNT_CONFIG + 0xff,
+               .flags = IORESOURCE_MEM,
+       },
        {
                .start = IRQ_CNT,
                .end = IRQ_CNT,
index 1fe7ff286619f693c113faeac592ce3167d8de72..4204b9842532134e7a657bd0d7f9b5c8f1aeb7d7 100644 (file)
@@ -159,7 +159,7 @@ static struct platform_device bf54x_kpad_device = {
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
        /*.rotary_up_key     = KEY_UP,*/
@@ -172,6 +172,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
 };
 
 static struct resource bfin_rotary_resources[] = {
+       {
+               .start = CNT_CONFIG,
+               .end   = CNT_CONFIG + 0xff,
+               .flags = IORESOURCE_MEM,
+       },
        {
                .start = IRQ_CNT,
                .end = IRQ_CNT,
index e2c0b024ce88f2593f4551b711b3821d8ae3879e..7f9fc272ec30576827ad62922afb0ff799ab89c3 100644 (file)
@@ -75,7 +75,7 @@ static struct platform_device bfin_isp1760_device = {
 #endif
 
 #if IS_ENABLED(CONFIG_INPUT_BFIN_ROTARY)
-#include <asm/bfin_rotary.h>
+#include <linux/platform_data/bfin_rotary.h>
 
 static struct bfin_rotary_platform_data bfin_rotary_data = {
        /*.rotary_up_key     = KEY_UP,*/
@@ -87,6 +87,11 @@ static struct bfin_rotary_platform_data bfin_rotary_data = {
 };
 
 static struct resource bfin_rotary_resources[] = {
+       {
+               .start = CNT_CONFIG,
+               .end   = CNT_CONFIG + 0xff,
+               .flags = IORESOURCE_MEM,
+       },
        {
                .start = IRQ_CNT,
                .end = IRQ_CNT,
index 843713c05b79fe69f8bbc057a17a5e200dfc54da..c7a16904cd03c705333f645419ec07888fc6fd87 100644 (file)
@@ -54,6 +54,7 @@ config MIPS
        select CPU_PM if CPU_IDLE
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_BINFMT_ELF_STATE
+       select SYSCTL_EXCEPTION_TRACE
 
 menu "Machine selection"
 
@@ -376,8 +377,10 @@ config MIPS_MALTA
        select SYS_HAS_CPU_MIPS32_R1
        select SYS_HAS_CPU_MIPS32_R2
        select SYS_HAS_CPU_MIPS32_R3_5
+       select SYS_HAS_CPU_MIPS32_R6
        select SYS_HAS_CPU_MIPS64_R1
        select SYS_HAS_CPU_MIPS64_R2
+       select SYS_HAS_CPU_MIPS64_R6
        select SYS_HAS_CPU_NEVADA
        select SYS_HAS_CPU_RM7000
        select SYS_SUPPORTS_32BIT_KERNEL
@@ -1033,6 +1036,9 @@ config MIPS_MACHINE
 config NO_IOPORT_MAP
        def_bool n
 
+config GENERIC_CSUM
+       bool
+
 config GENERIC_ISA_DMA
        bool
        select ZONE_DMA if GENERIC_ISA_DMA_SUPPORT_BROKEN=n
@@ -1146,6 +1152,9 @@ config SOC_PNX8335
        bool
        select SOC_PNX833X
 
+config MIPS_SPRAM
+       bool
+
 config SWAP_IO_SPACE
        bool
 
@@ -1304,6 +1313,22 @@ config CPU_MIPS32_R2
          specific type of processor in your system, choose those that one
          otherwise CPU_MIPS32_R1 is a safe bet for any MIPS32 system.
 
+config CPU_MIPS32_R6
+       bool "MIPS32 Release 6 (EXPERIMENTAL)"
+       depends on SYS_HAS_CPU_MIPS32_R6
+       select CPU_HAS_PREFETCH
+       select CPU_SUPPORTS_32BIT_KERNEL
+       select CPU_SUPPORTS_HIGHMEM
+       select CPU_SUPPORTS_MSA
+       select GENERIC_CSUM
+       select HAVE_KVM
+       select MIPS_O32_FP64_SUPPORT
+       help
+         Choose this option to build a kernel for release 6 or later of the
+         MIPS32 architecture.  New MIPS processors, starting with the Warrior
+         family, are based on a MIPS32r6 processor. If you own an older
+         processor, you probably need to select MIPS32r1 or MIPS32r2 instead.
+
 config CPU_MIPS64_R1
        bool "MIPS64 Release 1"
        depends on SYS_HAS_CPU_MIPS64_R1
@@ -1339,6 +1364,21 @@ config CPU_MIPS64_R2
          specific type of processor in your system, choose those that one
          otherwise CPU_MIPS64_R1 is a safe bet for any MIPS64 system.
 
+config CPU_MIPS64_R6
+       bool "MIPS64 Release 6 (EXPERIMENTAL)"
+       depends on SYS_HAS_CPU_MIPS64_R6
+       select CPU_HAS_PREFETCH
+       select CPU_SUPPORTS_32BIT_KERNEL
+       select CPU_SUPPORTS_64BIT_KERNEL
+       select CPU_SUPPORTS_HIGHMEM
+       select CPU_SUPPORTS_MSA
+       select GENERIC_CSUM
+       help
+         Choose this option to build a kernel for release 6 or later of the
+         MIPS64 architecture.  New MIPS processors, starting with the Warrior
+         family, are based on a MIPS64r6 processor. If you own an older
+         processor, you probably need to select MIPS64r1 or MIPS64r2 instead.
+
 config CPU_R3000
        bool "R3000"
        depends on SYS_HAS_CPU_R3000
@@ -1539,7 +1579,7 @@ endchoice
 config CPU_MIPS32_3_5_FEATURES
        bool "MIPS32 Release 3.5 Features"
        depends on SYS_HAS_CPU_MIPS32_R3_5
-       depends on CPU_MIPS32_R2
+       depends on CPU_MIPS32_R2 || CPU_MIPS32_R6
        help
          Choose this option to build a kernel for release 2 or later of the
          MIPS32 architecture including features from the 3.5 release such as
@@ -1659,12 +1699,18 @@ config SYS_HAS_CPU_MIPS32_R2
 config SYS_HAS_CPU_MIPS32_R3_5
        bool
 
+config SYS_HAS_CPU_MIPS32_R6
+       bool
+
 config SYS_HAS_CPU_MIPS64_R1
        bool
 
 config SYS_HAS_CPU_MIPS64_R2
        bool
 
+config SYS_HAS_CPU_MIPS64_R6
+       bool
+
 config SYS_HAS_CPU_R3000
        bool
 
@@ -1764,11 +1810,11 @@ endmenu
 #
 config CPU_MIPS32
        bool
-       default y if CPU_MIPS32_R1 || CPU_MIPS32_R2
+       default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 || CPU_MIPS32_R6
 
 config CPU_MIPS64
        bool
-       default y if CPU_MIPS64_R1 || CPU_MIPS64_R2
+       default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 || CPU_MIPS64_R6
 
 #
 # These two indicate the revision of the architecture, either Release 1 or Release 2
@@ -1780,6 +1826,12 @@ config CPU_MIPSR1
 config CPU_MIPSR2
        bool
        default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
+       select MIPS_SPRAM
+
+config CPU_MIPSR6
+       bool
+       default y if CPU_MIPS32_R6 || CPU_MIPS64_R6
+       select MIPS_SPRAM
 
 config EVA
        bool
@@ -2013,6 +2065,19 @@ config MIPS_MT_FPAFF
        default y
        depends on MIPS_MT_SMP
 
+config MIPSR2_TO_R6_EMULATOR
+       bool "MIPS R2-to-R6 emulator"
+       depends on CPU_MIPSR6 && !SMP
+       default y
+       help
+         Choose this option if you want to run non-R6 MIPS userland code.
+         Even if you say 'Y' here, the emulator will still be disabled by
+         default. You can enable it using the 'mipsr2emul' kernel option.
+         The only reason this is a build-time option is to save ~14K from the
+         final kernel image.
+comment "MIPS R2-to-R6 emulator is only available for UP kernels"
+       depends on SMP && CPU_MIPSR6
+
 config MIPS_VPE_LOADER
        bool "VPE loader support."
        depends on SYS_SUPPORTS_MULTITHREADING && MODULES
@@ -2148,7 +2213,7 @@ config CPU_HAS_SMARTMIPS
          here.
 
 config CPU_MICROMIPS
-       depends on 32BIT && SYS_SUPPORTS_MICROMIPS
+       depends on 32BIT && SYS_SUPPORTS_MICROMIPS && !CPU_MIPSR6
        bool "microMIPS"
        help
          When this option is enabled the kernel will be built using the
index 88a9f433f6fc3ca7affc6c9a011622d26e10ada9..3a2b775e845893513e2ab187ca95955e0be17848 100644 (file)
@@ -122,17 +122,4 @@ config SPINLOCK_TEST
        help
          Add several files to the debugfs to test spinlock speed.
 
-config FP32XX_HYBRID_FPRS
-       bool "Run FP32 & FPXX code with hybrid FPRs"
-       depends on MIPS_O32_FP64_SUPPORT
-       help
-         The hybrid FPR scheme is normally used only when a program needs to
-         execute a mix of FP32 & FP64A code, since the trapping & emulation
-         that it entails is expensive. When enabled, this option will lead
-         to the kernel running programs which use the FP32 & FPXX FP ABIs
-         using the hybrid FPR scheme, which can be useful for debugging
-         purposes.
-
-         If unsure, say N.
-
 endmenu
index 2563a088d3b867037fa4f46332ca010f6984547d..8f57fc72d62c8334d35e91ca48f1aa6ead7d08d4 100644 (file)
@@ -122,26 +122,8 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__
 cflags-$(CONFIG_CPU_BIG_ENDIAN)                += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be))
 cflags-$(CONFIG_CPU_LITTLE_ENDIAN)     += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le))
 
-# For smartmips configurations, there are hundreds of warnings due to ISA overrides
-# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
-# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
-# similar directives in the kernel will spam the build logs with the following warnings:
-# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
-# or
-# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
-# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
-# been fixed properly.
-cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,-msmartmips) -Wa,--no-warn
-cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips)
-
 cflags-$(CONFIG_SB1XXX_CORELIS)        += $(call cc-option,-mno-sched-prolog) \
                                   -fno-omit-frame-pointer
-
-ifeq ($(CONFIG_CPU_HAS_MSA),y)
-toolchain-msa  := $(call cc-option-yn,-mhard-float -mfp64 -Wa$(comma)-mmsa)
-cflags-$(toolchain-msa)                += -DTOOLCHAIN_SUPPORTS_MSA
-endif
-
 #
 # CPU-dependent compiler/assembler options for optimization.
 #
@@ -156,10 +138,12 @@ cflags-$(CONFIG_CPU_MIPS32_R1)    += $(call cc-option,-march=mips32,-mips32 -U_MIPS
                        -Wa,-mips32 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS32_R2) += $(call cc-option,-march=mips32r2,-mips32r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS32) \
                        -Wa,-mips32r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS32_R6) += -march=mips32r6 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R1) += $(call cc-option,-march=mips64,-mips64 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
                        -Wa,-mips64 -Wa,--trap
 cflags-$(CONFIG_CPU_MIPS64_R2) += $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
                        -Wa,-mips64r2 -Wa,--trap
+cflags-$(CONFIG_CPU_MIPS64_R6) += -march=mips64r6 -Wa,--trap
 cflags-$(CONFIG_CPU_R5000)     += -march=r5000 -Wa,--trap
 cflags-$(CONFIG_CPU_R5432)     += $(call cc-option,-march=r5400,-march=r5000) \
                        -Wa,--trap
@@ -182,6 +166,16 @@ cflags-$(CONFIG_CPU_CAVIUM_OCTEON) += -Wa,-march=octeon
 endif
 cflags-$(CONFIG_CAVIUM_CN63XXP1) += -Wa,-mfix-cn63xxp1
 cflags-$(CONFIG_CPU_BMIPS)     += -march=mips32 -Wa,-mips32 -Wa,--trap
+#
+# binutils from v2.25 on and gcc starting from v4.9.0 treat -march=loongson3a
+# as MIPS64 R1; older versions as just R1.  This leaves the possibility open
+# that GCC might generate R2 code for -march=loongson3a which then is rejected
+# by GAS.  The cc-option can't probe for this behaviour so -march=loongson3a
+# can't easily be used safely within the kbuild framework.
+#
+cflags-$(CONFIG_CPU_LOONGSON3)  +=                                     \
+       $(call cc-option,-march=mips64r2,-mips64r2 -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64) \
+       -Wa,-mips64r2 -Wa,--trap
 
 cflags-$(CONFIG_CPU_R4000_WORKAROUNDS) += $(call cc-option,-mfix-r4000,)
 cflags-$(CONFIG_CPU_R4400_WORKAROUNDS) += $(call cc-option,-mfix-r4400,)
@@ -194,6 +188,23 @@ KBUILD_CFLAGS_MODULE += -msb1-pass1-workarounds
 endif
 endif
 
+# For smartmips configurations, there are hundreds of warnings due to ISA overrides
+# in assembly and header files. smartmips is only supported for MIPS32r1 onwards
+# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or
+# similar directives in the kernel will spam the build logs with the following warnings:
+# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater
+# or
+# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension
+# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has
+# been fixed properly.
+mips-cflags                            := "$(cflags-y)"
+cflags-$(CONFIG_CPU_HAS_SMARTMIPS)     += $(call cc-option,$(mips-cflags),-msmartmips) -Wa,--no-warn
+cflags-$(CONFIG_CPU_MICROMIPS)         += $(call cc-option,$(mips-cflags),-mmicromips)
+ifeq ($(CONFIG_CPU_HAS_MSA),y)
+toolchain-msa                          := $(call cc-option-yn,-$(mips-cflags),mhard-float -mfp64 -Wa$(comma)-mmsa)
+cflags-$(toolchain-msa)                        += -DTOOLCHAIN_SUPPORTS_MSA
+endif
+
 #
 # Firmware support
 #
@@ -287,7 +298,11 @@ boot-y                     += vmlinux.ecoff
 boot-y                 += vmlinux.srec
 ifeq ($(shell expr $(load-y) \< 0xffffffff80000000 2> /dev/null), 0)
 boot-y                 += uImage
+boot-y                 += uImage.bin
+boot-y                 += uImage.bz2
 boot-y                 += uImage.gz
+boot-y                 += uImage.lzma
+boot-y                 += uImage.lzo
 endif
 
 # compressed boot image targets (arch/mips/boot/compressed/)
@@ -386,7 +401,11 @@ define archhelp
        echo '  vmlinuz.bin          - Raw binary zboot image'
        echo '  vmlinuz.srec         - SREC zboot image'
        echo '  uImage               - U-Boot image'
+       echo '  uImage.bin           - U-Boot image (uncompressed)'
+       echo '  uImage.bz2           - U-Boot image (bz2)'
        echo '  uImage.gz            - U-Boot image (gzip)'
+       echo '  uImage.lzma          - U-Boot image (lzma)'
+       echo '  uImage.lzo           - U-Boot image (lzo)'
        echo '  dtbs                 - Device-tree blobs for enabled boards'
        echo
        echo '  These will be default as appropriate for a configured platform.'
index 48a9dfc55b51aa4a3819bae6c686b0cd5f38a7f0..6a98d2cb402ccb9c69ad505d819ed3f82f6e4932 100644 (file)
@@ -127,12 +127,20 @@ static unsigned long alchemy_clk_cpu_recalc(struct clk_hw *hw,
                t = 396000000;
        else {
                t = alchemy_rdsys(AU1000_SYS_CPUPLL) & 0x7f;
+               if (alchemy_get_cputype() < ALCHEMY_CPU_AU1300)
+                       t &= 0x3f;
                t *= parent_rate;
        }
 
        return t;
 }
 
+void __init alchemy_set_lpj(void)
+{
+       preset_lpj = alchemy_clk_cpu_recalc(NULL, ALCHEMY_ROOTCLK_RATE);
+       preset_lpj /= 2 * HZ;
+}
+
 static struct clk_ops alchemy_clkops_cpu = {
        .recalc_rate    = alchemy_clk_cpu_recalc,
 };
@@ -315,17 +323,26 @@ static struct clk __init *alchemy_clk_setup_mem(const char *pn, int ct)
 
 /* lrclk: external synchronous static bus clock ***********************/
 
-static struct clk __init *alchemy_clk_setup_lrclk(const char *pn)
+static struct clk __init *alchemy_clk_setup_lrclk(const char *pn, int t)
 {
-       /* MEM_STCFG0[15:13] = divisor.
+       /* Au1000, Au1500: MEM_STCFG0[11]: If bit is set, lrclk=pclk/5,
+        * otherwise lrclk=pclk/4.
+        * All other variants: MEM_STCFG0[15:13] = divisor.
         * L/RCLK = periph_clk / (divisor + 1)
         * On Au1000, Au1500, Au1100 it's called LCLK,
         * on later models it's called RCLK, but it's the same thing.
         */
        struct clk *c;
-       unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0) >> 13;
+       unsigned long v = alchemy_rdsmem(AU1000_MEM_STCFG0);
 
-       v = (v & 7) + 1;
+       switch (t) {
+       case ALCHEMY_CPU_AU1000:
+       case ALCHEMY_CPU_AU1500:
+               v = 4 + ((v >> 11) & 1);
+               break;
+       default:        /* all other models */
+               v = ((v >> 13) & 7) + 1;
+       }
        c = clk_register_fixed_factor(NULL, ALCHEMY_LR_CLK,
                                      pn, 0, 1, v);
        if (!IS_ERR(c))
@@ -546,6 +563,8 @@ static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
 }
 
 static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
@@ -678,6 +697,8 @@ static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
 }
 
 static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
@@ -897,6 +918,8 @@ static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
 }
 
 static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
@@ -1060,7 +1083,7 @@ static int __init alchemy_clk_init(void)
        ERRCK(c)
 
        /* L/RCLK: external static bus clock for synchronous mode */
-       c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK);
+       c = alchemy_clk_setup_lrclk(ALCHEMY_PERIPH_CLK, ctype);
        ERRCK(c)
 
        /* Frequency dividers 0-5 */
index 4e72daf12c325063a62da8c9255cee504a4e5fd4..2902138b3e0f56f639896c571e5bc87172f74d2f 100644 (file)
 #include <au1000.h>
 
 extern void __init board_setup(void);
-extern void set_cpuspec(void);
+extern void __init alchemy_set_lpj(void);
 
 void __init plat_mem_setup(void)
 {
+       alchemy_set_lpj();
+
        if (au1xxx_cpu_needs_config_od())
                /* Various early Au1xx0 errata corrected by this */
                set_c0_config(1 << 19); /* Set Config[OD] */
index 0fb5134fb83247a2395162ffc3b93597cc7d7614..fd94fe849af680f3edc24a3dcb184c308fa0613e 100644 (file)
@@ -180,7 +180,7 @@ static int __init intc_of_init(struct device_node *node,
 
 static struct of_device_id of_irq_ids[] __initdata = {
        { .compatible = "mti,cpu-interrupt-controller",
-         .data = mips_cpu_intc_init },
+         .data = mips_cpu_irq_of_init },
        { .compatible = "brcm,bcm3384-intc",
          .data = intc_of_init },
        {},
index 1466c00260936c7e387877c8c9e296d430cd6240..acb1988f354edc58072399a076656c0f2ffd149e 100644 (file)
@@ -23,6 +23,12 @@ strip-flags   := $(addprefix --remove-section=,$(drop-sections))
 
 hostprogs-y := elf2ecoff
 
+suffix-y                       := bin
+suffix-$(CONFIG_KERNEL_BZIP2)  := bz2
+suffix-$(CONFIG_KERNEL_GZIP)   := gz
+suffix-$(CONFIG_KERNEL_LZMA)   := lzma
+suffix-$(CONFIG_KERNEL_LZO)    := lzo
+
 targets := vmlinux.ecoff
 quiet_cmd_ecoff = ECOFF          $@
       cmd_ecoff = $(obj)/elf2ecoff $(VMLINUX) $@ $(e2eflag)
@@ -44,14 +50,53 @@ $(obj)/vmlinux.srec: $(VMLINUX) FORCE
 UIMAGE_LOADADDR  = $(VMLINUX_LOAD_ADDRESS)
 UIMAGE_ENTRYADDR = $(VMLINUX_ENTRY_ADDRESS)
 
+#
+# Compressed vmlinux images
+#
+
+extra-y += vmlinux.bin.bz2
+extra-y += vmlinux.bin.gz
+extra-y += vmlinux.bin.lzma
+extra-y += vmlinux.bin.lzo
+
+$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,bzip2)
+
 $(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
        $(call if_changed,gzip)
 
+$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,lzma)
+
+$(obj)/vmlinux.bin.lzo: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,lzo)
+
+#
+# Compressed u-boot images
+#
+
+targets += uImage
+targets += uImage.bin
+targets += uImage.bz2
 targets += uImage.gz
+targets += uImage.lzma
+targets += uImage.lzo
+
+$(obj)/uImage.bin: $(obj)/vmlinux.bin FORCE
+       $(call if_changed,uimage,none)
+
+$(obj)/uImage.bz2: $(obj)/vmlinux.bin.bz2 FORCE
+       $(call if_changed,uimage,bzip2)
+
 $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
        $(call if_changed,uimage,gzip)
 
-targets += uImage
-$(obj)/uImage: $(obj)/uImage.gz FORCE
+$(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
+       $(call if_changed,uimage,lzma)
+
+$(obj)/uImage.lzo: $(obj)/vmlinux.bin.lzo FORCE
+       $(call if_changed,uimage,lzo)
+
+$(obj)/uImage: $(obj)/uImage.$(suffix-y)
        @ln -sf $(notdir $<) $@
        @echo '  Image $@ is ready'
index 2a4c52e27f416e146e5c268edad9fd867e79c5fe..266c8137e859d418faed5e6fa3a8549b0aeff9df 100644 (file)
@@ -268,7 +268,6 @@ int main(int argc, char *argv[])
        Elf32_Ehdr ex;
        Elf32_Phdr *ph;
        Elf32_Shdr *sh;
-       char *shstrtab;
        int i, pad;
        struct sect text, data, bss;
        struct filehdr efh;
@@ -336,9 +335,6 @@ int main(int argc, char *argv[])
                                     "sh");
        if (must_convert_endian)
                convert_elf_shdrs(sh, ex.e_shnum);
-       /* Read in the section string table. */
-       shstrtab = saveRead(infile, sh[ex.e_shstrndx].sh_offset,
-                           sh[ex.e_shstrndx].sh_size, "shstrtab");
 
        /* Figure out if we can cram the program header into an ECOFF
           header...  Basically, we can't handle anything but loadable
index b752c4ed0b797938c6ff58e7e1583982cdbdd5ac..1882e6475dd093d546c70cf9f7345f8121589921 100644 (file)
@@ -18,7 +18,7 @@
 #include <asm/octeon/octeon.h>
 #include <asm/octeon/cvmx-ipd-defs.h>
 #include <asm/octeon/cvmx-mio-defs.h>
-
+#include <asm/octeon/cvmx-rst-defs.h>
 
 static u64 f;
 static u64 rdiv;
@@ -39,11 +39,20 @@ void __init octeon_setup_delays(void)
 
        if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
                union cvmx_mio_rst_boot rst_boot;
+
                rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
                rdiv = rst_boot.s.c_mul;        /* CPU clock */
                sdiv = rst_boot.s.pnr_mul;      /* I/O clock */
                f = (0x8000000000000000ull / sdiv) * 2;
+       } else if (current_cpu_type() == CPU_CAVIUM_OCTEON3) {
+               union cvmx_rst_boot rst_boot;
+
+               rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
+               rdiv = rst_boot.s.c_mul;        /* CPU clock */
+               sdiv = rst_boot.s.pnr_mul;      /* I/O clock */
+               f = (0x8000000000000000ull / sdiv) * 2;
        }
+
 }
 
 /*
index 3778655c4a375215fddea8fd0969960d096d0d9c..7d8987818ccf51ed6aa82463fbe3de5dd599557e 100644 (file)
@@ -276,7 +276,7 @@ void __init plat_swiotlb_setup(void)
                        continue;
 
                /* These addresses map low for PCI. */
-               if (e->addr > 0x410000000ull && !OCTEON_IS_MODEL(OCTEON_CN6XXX))
+               if (e->addr > 0x410000000ull && !OCTEON_IS_OCTEON2())
                        continue;
 
                addr_size += e->size;
@@ -308,7 +308,7 @@ void __init plat_swiotlb_setup(void)
 #endif
 #ifdef CONFIG_USB_OCTEON_OHCI
        /* OCTEON II ohci is only 32-bit. */
-       if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && max_addr >= 0x100000000ul)
+       if (OCTEON_IS_OCTEON2() && max_addr >= 0x100000000ul)
                swiotlbsize = 64 * (1<<20);
 #endif
        swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
index 5dfef84b95767502a8cf9f5b4578b6f97a3bab76..9eb0feef441721362ee5e699e2788c1ffaf3b391 100644 (file)
@@ -767,7 +767,7 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo
                break;
        }
        /* Most boards except NIC10e use a 12MHz crystal */
-       if (OCTEON_IS_MODEL(OCTEON_FAM_2))
+       if (OCTEON_IS_OCTEON2())
                return USB_CLOCK_TYPE_CRYSTAL_12;
        return USB_CLOCK_TYPE_REF_48;
 }
index 2bc4aa95944e462d84673bb974e2dde119fb6bdf..10f762557b925d419de87351836f25db4004f04f 100644 (file)
@@ -3,12 +3,14 @@
  * License.  See the file "COPYING" in the main directory of this archive
  * for more details.
  *
- * Copyright (C) 2004-2012 Cavium, Inc.
+ * Copyright (C) 2004-2014 Cavium, Inc.
  */
 
+#include <linux/of_address.h>
 #include <linux/interrupt.h>
 #include <linux/irqdomain.h>
 #include <linux/bitops.h>
+#include <linux/of_irq.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
 #include <linux/irq.h>
@@ -22,16 +24,25 @@ static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
 static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
 static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
 
+struct octeon_irq_ciu_domain_data {
+       int num_sum;  /* number of sum registers (2 or 3). */
+};
+
 static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
 
-union octeon_ciu_chip_data {
-       void *p;
-       unsigned long l;
-       struct {
-               unsigned long line:6;
-               unsigned long bit:6;
-               unsigned long gpio_line:6;
-       } s;
+struct octeon_ciu_chip_data {
+       union {
+               struct {                /* only used for ciu3 */
+                       u64 ciu3_addr;
+                       unsigned int intsn;
+               };
+               struct {                /* only used for ciu/ciu2 */
+                       u8 line;
+                       u8 bit;
+                       u8 gpio_line;
+               };
+       };
+       int current_cpu;        /* Next CPU expected to take this irq */
 };
 
 struct octeon_core_chip_data {
@@ -45,27 +56,40 @@ struct octeon_core_chip_data {
 
 static struct octeon_core_chip_data octeon_irq_core_chip_data[MIPS_CORE_IRQ_LINES];
 
-static void octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
-                                      struct irq_chip *chip,
-                                      irq_flow_handler_t handler)
+static int octeon_irq_set_ciu_mapping(int irq, int line, int bit, int gpio_line,
+                                     struct irq_chip *chip,
+                                     irq_flow_handler_t handler)
 {
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
+
+       cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+       if (!cd)
+               return -ENOMEM;
 
        irq_set_chip_and_handler(irq, chip, handler);
 
-       cd.l = 0;
-       cd.s.line = line;
-       cd.s.bit = bit;
-       cd.s.gpio_line = gpio_line;
+       cd->line = line;
+       cd->bit = bit;
+       cd->gpio_line = gpio_line;
 
-       irq_set_chip_data(irq, cd.p);
+       irq_set_chip_data(irq, cd);
        octeon_irq_ciu_to_irq[line][bit] = irq;
+       return 0;
 }
 
-static void octeon_irq_force_ciu_mapping(struct irq_domain *domain,
-                                        int irq, int line, int bit)
+static void octeon_irq_free_cd(struct irq_domain *d, unsigned int irq)
 {
-       irq_domain_associate(domain, irq, line << 6 | bit);
+       struct irq_data *data = irq_get_irq_data(irq);
+       struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+       irq_set_chip_data(irq, NULL);
+       kfree(cd);
+}
+
+static int octeon_irq_force_ciu_mapping(struct irq_domain *domain,
+                                       int irq, int line, int bit)
+{
+       return irq_domain_associate(domain, irq, line << 6 | bit);
 }
 
 static int octeon_coreid_for_cpu(int cpu)
@@ -202,9 +226,10 @@ static int next_cpu_for_irq(struct irq_data *data)
 #ifdef CONFIG_SMP
        int cpu;
        int weight = cpumask_weight(data->affinity);
+       struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
 
        if (weight > 1) {
-               cpu = smp_processor_id();
+               cpu = cd->current_cpu;
                for (;;) {
                        cpu = cpumask_next(cpu, data->affinity);
                        if (cpu >= nr_cpu_ids) {
@@ -219,6 +244,7 @@ static int next_cpu_for_irq(struct irq_data *data)
        } else {
                cpu = smp_processor_id();
        }
+       cd->current_cpu = cpu;
        return cpu;
 #else
        return smp_processor_id();
@@ -231,15 +257,15 @@ static void octeon_irq_ciu_enable(struct irq_data *data)
        int coreid = octeon_coreid_for_cpu(cpu);
        unsigned long *pen;
        unsigned long flags;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        raw_spin_lock_irqsave(lock, flags);
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -248,7 +274,7 @@ static void octeon_irq_ciu_enable(struct irq_data *data)
                cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
        } else {
                pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -263,15 +289,15 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data)
 {
        unsigned long *pen;
        unsigned long flags;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        raw_spin_lock_irqsave(lock, flags);
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -280,7 +306,7 @@ static void octeon_irq_ciu_enable_local(struct irq_data *data)
                cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
        } else {
                pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -295,15 +321,15 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data)
 {
        unsigned long *pen;
        unsigned long flags;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock = this_cpu_ptr(&octeon_irq_ciu_spinlock);
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        raw_spin_lock_irqsave(lock, flags);
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                pen = this_cpu_ptr(&octeon_irq_ciu0_en_mirror);
-               __clear_bit(cd.s.bit, pen);
+               __clear_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -312,7 +338,7 @@ static void octeon_irq_ciu_disable_local(struct irq_data *data)
                cvmx_write_csr(CVMX_CIU_INTX_EN0(cvmx_get_core_num() * 2), *pen);
        } else {
                pen = this_cpu_ptr(&octeon_irq_ciu1_en_mirror);
-               __clear_bit(cd.s.bit, pen);
+               __clear_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
@@ -328,27 +354,27 @@ static void octeon_irq_ciu_disable_all(struct irq_data *data)
        unsigned long flags;
        unsigned long *pen;
        int cpu;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock;
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        for_each_online_cpu(cpu) {
                int coreid = octeon_coreid_for_cpu(cpu);
                lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
                else
                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
                raw_spin_lock_irqsave(lock, flags);
-               __clear_bit(cd.s.bit, pen);
+               __clear_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
                 */
                wmb();
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
                else
                        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -361,27 +387,27 @@ static void octeon_irq_ciu_enable_all(struct irq_data *data)
        unsigned long flags;
        unsigned long *pen;
        int cpu;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        raw_spinlock_t *lock;
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        for_each_online_cpu(cpu) {
                int coreid = octeon_coreid_for_cpu(cpu);
                lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
                else
                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
                raw_spin_lock_irqsave(lock, flags);
-               __set_bit(cd.s.bit, pen);
+               __set_bit(cd->bit, pen);
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
                 * enabling the irq.
                 */
                wmb();
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
                else
                        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -397,26 +423,87 @@ static void octeon_irq_ciu_enable_v2(struct irq_data *data)
 {
        u64 mask;
        int cpu = next_cpu_for_irq(data);
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
        /*
         * Called under the desc lock, so these should never get out
         * of sync.
         */
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                int index = octeon_coreid_for_cpu(cpu) * 2;
-               set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+               set_bit(cd->bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
        } else {
                int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
-               set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+               set_bit(cd->bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
        }
 }
 
+/*
+ * Enable the irq in the sum2 registers.
+ */
+static void octeon_irq_ciu_enable_sum2(struct irq_data *data)
+{
+       u64 mask;
+       int cpu = next_cpu_for_irq(data);
+       int index = octeon_coreid_for_cpu(cpu);
+       struct octeon_ciu_chip_data *cd;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
+
+       cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
+}
+
+/*
+ * Disable the irq in the sum2 registers.
+ */
+static void octeon_irq_ciu_disable_local_sum2(struct irq_data *data)
+{
+       u64 mask;
+       int cpu = next_cpu_for_irq(data);
+       int index = octeon_coreid_for_cpu(cpu);
+       struct octeon_ciu_chip_data *cd;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
+
+       cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
+}
+
+static void octeon_irq_ciu_ack_sum2(struct irq_data *data)
+{
+       u64 mask;
+       int cpu = next_cpu_for_irq(data);
+       int index = octeon_coreid_for_cpu(cpu);
+       struct octeon_ciu_chip_data *cd;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
+
+       cvmx_write_csr(CVMX_CIU_SUM2_PPX_IP4(index), mask);
+}
+
+static void octeon_irq_ciu_disable_all_sum2(struct irq_data *data)
+{
+       int cpu;
+       struct octeon_ciu_chip_data *cd;
+       u64 mask;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
+
+       for_each_online_cpu(cpu) {
+               int coreid = octeon_coreid_for_cpu(cpu);
+
+               cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(coreid), mask);
+       }
+}
+
 /*
  * Enable the irq on the current CPU for chips that
  * have the EN*_W1{S,C} registers.
@@ -424,18 +511,18 @@ static void octeon_irq_ciu_enable_v2(struct irq_data *data)
 static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
 {
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                int index = cvmx_get_core_num() * 2;
-               set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
+               set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
        } else {
                int index = cvmx_get_core_num() * 2 + 1;
-               set_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
+               set_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
        }
 }
@@ -443,18 +530,18 @@ static void octeon_irq_ciu_enable_local_v2(struct irq_data *data)
 static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
 {
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                int index = cvmx_get_core_num() * 2;
-               clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
+               clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu0_en_mirror));
                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
        } else {
                int index = cvmx_get_core_num() * 2 + 1;
-               clear_bit(cd.s.bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
+               clear_bit(cd->bit, this_cpu_ptr(&octeon_irq_ciu1_en_mirror));
                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
        }
 }
@@ -465,12 +552,12 @@ static void octeon_irq_ciu_disable_local_v2(struct irq_data *data)
 static void octeon_irq_ciu_ack(struct irq_data *data)
 {
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                int index = cvmx_get_core_num() * 2;
                cvmx_write_csr(CVMX_CIU_INTX_SUM0(index), mask);
        } else {
@@ -486,21 +573,23 @@ static void octeon_irq_ciu_disable_all_v2(struct irq_data *data)
 {
        int cpu;
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                for_each_online_cpu(cpu) {
                        int index = octeon_coreid_for_cpu(cpu) * 2;
-                       clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+                       clear_bit(cd->bit,
+                               &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
                        cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
                }
        } else {
                for_each_online_cpu(cpu) {
                        int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
-                       clear_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+                       clear_bit(cd->bit,
+                               &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
                        cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
                }
        }
@@ -514,21 +603,23 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
 {
        int cpu;
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                for_each_online_cpu(cpu) {
                        int index = octeon_coreid_for_cpu(cpu) * 2;
-                       set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
+                       set_bit(cd->bit,
+                               &per_cpu(octeon_irq_ciu0_en_mirror, cpu));
                        cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
                }
        } else {
                for_each_online_cpu(cpu) {
                        int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
-                       set_bit(cd.s.bit, &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
+                       set_bit(cd->bit,
+                               &per_cpu(octeon_irq_ciu1_en_mirror, cpu));
                        cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
                }
        }
@@ -537,10 +628,10 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
 static void octeon_irq_gpio_setup(struct irq_data *data)
 {
        union cvmx_gpio_bit_cfgx cfg;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        u32 t = irqd_get_trigger_type(data);
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        cfg.u64 = 0;
        cfg.s.int_en = 1;
@@ -551,7 +642,7 @@ static void octeon_irq_gpio_setup(struct irq_data *data)
        cfg.s.fil_cnt = 7;
        cfg.s.fil_sel = 3;
 
-       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), cfg.u64);
+       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), cfg.u64);
 }
 
 static void octeon_irq_ciu_enable_gpio_v2(struct irq_data *data)
@@ -576,36 +667,36 @@ static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
 
 static void octeon_irq_ciu_disable_gpio_v2(struct irq_data *data)
 {
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
+       cd = irq_data_get_irq_chip_data(data);
+       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 
        octeon_irq_ciu_disable_all_v2(data);
 }
 
 static void octeon_irq_ciu_disable_gpio(struct irq_data *data)
 {
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
+       cd = irq_data_get_irq_chip_data(data);
+       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 
        octeon_irq_ciu_disable_all(data);
 }
 
 static void octeon_irq_ciu_gpio_ack(struct irq_data *data)
 {
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        u64 mask;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.gpio_line);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->gpio_line);
 
        cvmx_write_csr(CVMX_GPIO_INT_CLR, mask);
 }
 
-static void octeon_irq_handle_gpio(unsigned int irq, struct irq_desc *desc)
+static void octeon_irq_handle_trigger(unsigned int irq, struct irq_desc *desc)
 {
        if (irq_get_trigger_type(irq) & IRQ_TYPE_EDGE_BOTH)
                handle_edge_irq(irq, desc);
@@ -644,11 +735,11 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
        int cpu;
        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
        unsigned long flags;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
        unsigned long *pen;
        raw_spinlock_t *lock;
 
-       cd.p = irq_data_get_irq_chip_data(data);
+       cd = irq_data_get_irq_chip_data(data);
 
        /*
         * For non-v2 CIU, we will allow only single CPU affinity.
@@ -668,16 +759,16 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
                lock = &per_cpu(octeon_irq_ciu_spinlock, cpu);
                raw_spin_lock_irqsave(lock, flags);
 
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
                else
                        pen = &per_cpu(octeon_irq_ciu1_en_mirror, cpu);
 
                if (cpumask_test_cpu(cpu, dest) && enable_one) {
                        enable_one = 0;
-                       __set_bit(cd.s.bit, pen);
+                       __set_bit(cd->bit, pen);
                } else {
-                       __clear_bit(cd.s.bit, pen);
+                       __clear_bit(cd->bit, pen);
                }
                /*
                 * Must be visible to octeon_irq_ip{2,3}_ciu() before
@@ -685,7 +776,7 @@ static int octeon_irq_ciu_set_affinity(struct irq_data *data,
                 */
                wmb();
 
-               if (cd.s.line == 0)
+               if (cd->line == 0)
                        cvmx_write_csr(CVMX_CIU_INTX_EN0(coreid * 2), *pen);
                else
                        cvmx_write_csr(CVMX_CIU_INTX_EN1(coreid * 2 + 1), *pen);
@@ -706,24 +797,24 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
        int cpu;
        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
        if (!enable_one)
                return 0;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << cd.s.bit;
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << cd->bit;
 
-       if (cd.s.line == 0) {
+       if (cd->line == 0) {
                for_each_online_cpu(cpu) {
                        unsigned long *pen = &per_cpu(octeon_irq_ciu0_en_mirror, cpu);
                        int index = octeon_coreid_for_cpu(cpu) * 2;
                        if (cpumask_test_cpu(cpu, dest) && enable_one) {
                                enable_one = false;
-                               set_bit(cd.s.bit, pen);
+                               set_bit(cd->bit, pen);
                                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1S(index), mask);
                        } else {
-                               clear_bit(cd.s.bit, pen);
+                               clear_bit(cd->bit, pen);
                                cvmx_write_csr(CVMX_CIU_INTX_EN0_W1C(index), mask);
                        }
                }
@@ -733,22 +824,62 @@ static int octeon_irq_ciu_set_affinity_v2(struct irq_data *data,
                        int index = octeon_coreid_for_cpu(cpu) * 2 + 1;
                        if (cpumask_test_cpu(cpu, dest) && enable_one) {
                                enable_one = false;
-                               set_bit(cd.s.bit, pen);
+                               set_bit(cd->bit, pen);
                                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1S(index), mask);
                        } else {
-                               clear_bit(cd.s.bit, pen);
+                               clear_bit(cd->bit, pen);
                                cvmx_write_csr(CVMX_CIU_INTX_EN1_W1C(index), mask);
                        }
                }
        }
        return 0;
 }
+
+static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
+                                           const struct cpumask *dest,
+                                           bool force)
+{
+       int cpu;
+       bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+       u64 mask;
+       struct octeon_ciu_chip_data *cd;
+
+       if (!enable_one)
+               return 0;
+
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << cd->bit;
+
+       for_each_online_cpu(cpu) {
+               int index = octeon_coreid_for_cpu(cpu);
+
+               if (cpumask_test_cpu(cpu, dest) && enable_one) {
+                       enable_one = false;
+                       cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1S(index), mask);
+               } else {
+                       cvmx_write_csr(CVMX_CIU_EN2_PPX_IP4_W1C(index), mask);
+               }
+       }
+       return 0;
+}
 #endif
 
 /*
  * Newer octeon chips have support for lockless CIU operation.
  */
 static struct irq_chip octeon_irq_chip_ciu_v2 = {
+       .name = "CIU",
+       .irq_enable = octeon_irq_ciu_enable_v2,
+       .irq_disable = octeon_irq_ciu_disable_all_v2,
+       .irq_mask = octeon_irq_ciu_disable_local_v2,
+       .irq_unmask = octeon_irq_ciu_enable_v2,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_v2_edge = {
        .name = "CIU",
        .irq_enable = octeon_irq_ciu_enable_v2,
        .irq_disable = octeon_irq_ciu_disable_all_v2,
@@ -761,7 +892,47 @@ static struct irq_chip octeon_irq_chip_ciu_v2 = {
 #endif
 };
 
+/*
+ * Newer octeon chips have support for lockless CIU operation.
+ */
+static struct irq_chip octeon_irq_chip_ciu_sum2 = {
+       .name = "CIU",
+       .irq_enable = octeon_irq_ciu_enable_sum2,
+       .irq_disable = octeon_irq_ciu_disable_all_sum2,
+       .irq_mask = octeon_irq_ciu_disable_local_sum2,
+       .irq_unmask = octeon_irq_ciu_enable_sum2,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_sum2_edge = {
+       .name = "CIU",
+       .irq_enable = octeon_irq_ciu_enable_sum2,
+       .irq_disable = octeon_irq_ciu_disable_all_sum2,
+       .irq_ack = octeon_irq_ciu_ack_sum2,
+       .irq_mask = octeon_irq_ciu_disable_local_sum2,
+       .irq_unmask = octeon_irq_ciu_enable_sum2,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu_set_affinity_sum2,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
 static struct irq_chip octeon_irq_chip_ciu = {
+       .name = "CIU",
+       .irq_enable = octeon_irq_ciu_enable,
+       .irq_disable = octeon_irq_ciu_disable_all,
+       .irq_mask = octeon_irq_ciu_disable_local,
+       .irq_unmask = octeon_irq_ciu_enable,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu_set_affinity,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu_edge = {
        .name = "CIU",
        .irq_enable = octeon_irq_ciu_enable,
        .irq_disable = octeon_irq_ciu_disable_all,
@@ -970,11 +1141,12 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d,
                               unsigned int *out_type)
 {
        unsigned int ciu, bit;
+       struct octeon_irq_ciu_domain_data *dd = d->host_data;
 
        ciu = intspec[0];
        bit = intspec[1];
 
-       if (ciu > 1 || bit > 63)
+       if (ciu >= dd->num_sum || bit > 63)
                return -EINVAL;
 
        *out_hwirq = (ciu << 6) | bit;
@@ -984,6 +1156,7 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d,
 }
 
 static struct irq_chip *octeon_irq_ciu_chip;
+static struct irq_chip *octeon_irq_ciu_chip_edge;
 static struct irq_chip *octeon_irq_gpio_chip;
 
 static bool octeon_irq_virq_in_range(unsigned int virq)
@@ -999,8 +1172,10 @@ static bool octeon_irq_virq_in_range(unsigned int virq)
 static int octeon_irq_ciu_map(struct irq_domain *d,
                              unsigned int virq, irq_hw_number_t hw)
 {
+       int rv;
        unsigned int line = hw >> 6;
        unsigned int bit = hw & 63;
+       struct octeon_irq_ciu_domain_data *dd = d->host_data;
 
        if (!octeon_irq_virq_in_range(virq))
                return -EINVAL;
@@ -1009,54 +1184,61 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
        if (line == 0 && bit >= 16 && bit <32)
                return 0;
 
-       if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
+       if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
                return -EINVAL;
 
-       if (octeon_irq_ciu_is_edge(line, bit))
-               octeon_irq_set_ciu_mapping(virq, line, bit, 0,
-                                          octeon_irq_ciu_chip,
-                                          handle_edge_irq);
-       else
-               octeon_irq_set_ciu_mapping(virq, line, bit, 0,
-                                          octeon_irq_ciu_chip,
-                                          handle_level_irq);
-
-       return 0;
+       if (line == 2) {
+               if (octeon_irq_ciu_is_edge(line, bit))
+                       rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+                               &octeon_irq_chip_ciu_sum2_edge,
+                               handle_edge_irq);
+               else
+                       rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+                               &octeon_irq_chip_ciu_sum2,
+                               handle_level_irq);
+       } else {
+               if (octeon_irq_ciu_is_edge(line, bit))
+                       rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+                               octeon_irq_ciu_chip_edge,
+                               handle_edge_irq);
+               else
+                       rv = octeon_irq_set_ciu_mapping(virq, line, bit, 0,
+                               octeon_irq_ciu_chip,
+                               handle_level_irq);
+       }
+       return rv;
 }
 
-static int octeon_irq_gpio_map_common(struct irq_domain *d,
-                                     unsigned int virq, irq_hw_number_t hw,
-                                     int line_limit, struct irq_chip *chip)
+static int octeon_irq_gpio_map(struct irq_domain *d,
+                              unsigned int virq, irq_hw_number_t hw)
 {
        struct octeon_irq_gpio_domain_data *gpiod = d->host_data;
        unsigned int line, bit;
+       int r;
 
        if (!octeon_irq_virq_in_range(virq))
                return -EINVAL;
 
        line = (hw + gpiod->base_hwirq) >> 6;
        bit = (hw + gpiod->base_hwirq) & 63;
-       if (line > line_limit || octeon_irq_ciu_to_irq[line][bit] != 0)
+       if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
+               octeon_irq_ciu_to_irq[line][bit] != 0)
                return -EINVAL;
 
-       octeon_irq_set_ciu_mapping(virq, line, bit, hw,
-                                  chip, octeon_irq_handle_gpio);
-       return 0;
-}
-
-static int octeon_irq_gpio_map(struct irq_domain *d,
-                              unsigned int virq, irq_hw_number_t hw)
-{
-       return octeon_irq_gpio_map_common(d, virq, hw, 1, octeon_irq_gpio_chip);
+       r = octeon_irq_set_ciu_mapping(virq, line, bit, hw,
+               octeon_irq_gpio_chip, octeon_irq_handle_trigger);
+       return r;
 }
 
 static struct irq_domain_ops octeon_irq_domain_ciu_ops = {
        .map = octeon_irq_ciu_map,
+       .unmap = octeon_irq_free_cd,
        .xlate = octeon_irq_ciu_xlat,
 };
 
 static struct irq_domain_ops octeon_irq_domain_gpio_ops = {
        .map = octeon_irq_gpio_map,
+       .unmap = octeon_irq_free_cd,
        .xlate = octeon_irq_gpio_xlat,
 };
 
@@ -1095,6 +1277,26 @@ static void octeon_irq_ip3_ciu(void)
        }
 }
 
+static void octeon_irq_ip4_ciu(void)
+{
+       int coreid = cvmx_get_core_num();
+       u64 ciu_sum = cvmx_read_csr(CVMX_CIU_SUM2_PPX_IP4(coreid));
+       u64 ciu_en = cvmx_read_csr(CVMX_CIU_EN2_PPX_IP4(coreid));
+
+       ciu_sum &= ciu_en;
+       if (likely(ciu_sum)) {
+               int bit = fls64(ciu_sum) - 1;
+               int irq = octeon_irq_ciu_to_irq[2][bit];
+
+               if (likely(irq))
+                       do_IRQ(irq);
+               else
+                       spurious_interrupt();
+       } else {
+               spurious_interrupt();
+       }
+}
+
 static bool octeon_irq_use_ip4;
 
 static void octeon_irq_local_enable_ip4(void *arg)
@@ -1176,7 +1378,10 @@ static void octeon_irq_setup_secondary_ciu(void)
 
        /* Enable the CIU lines */
        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
-       clear_c0_status(STATUSF_IP4);
+       if (octeon_irq_use_ip4)
+               set_c0_status(STATUSF_IP4);
+       else
+               clear_c0_status(STATUSF_IP4);
 }
 
 static void octeon_irq_setup_secondary_ciu2(void)
@@ -1192,95 +1397,194 @@ static void octeon_irq_setup_secondary_ciu2(void)
                clear_c0_status(STATUSF_IP4);
 }
 
-static void __init octeon_irq_init_ciu(void)
+static int __init octeon_irq_init_ciu(
+       struct device_node *ciu_node, struct device_node *parent)
 {
-       unsigned int i;
+       unsigned int i, r;
        struct irq_chip *chip;
+       struct irq_chip *chip_edge;
        struct irq_chip *chip_mbox;
        struct irq_chip *chip_wd;
-       struct device_node *gpio_node;
-       struct device_node *ciu_node;
        struct irq_domain *ciu_domain = NULL;
+       struct octeon_irq_ciu_domain_data *dd;
+
+       dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+       if (!dd)
+               return -ENOMEM;
 
        octeon_irq_init_ciu_percpu();
        octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu;
 
        octeon_irq_ip2 = octeon_irq_ip2_ciu;
        octeon_irq_ip3 = octeon_irq_ip3_ciu;
+       if ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
+               && !OCTEON_IS_MODEL(OCTEON_CN63XX)) {
+               octeon_irq_ip4 =  octeon_irq_ip4_ciu;
+               dd->num_sum = 3;
+               octeon_irq_use_ip4 = true;
+       } else {
+               octeon_irq_ip4 = octeon_irq_ip4_mask;
+               dd->num_sum = 2;
+               octeon_irq_use_ip4 = false;
+       }
        if (OCTEON_IS_MODEL(OCTEON_CN58XX_PASS2_X) ||
            OCTEON_IS_MODEL(OCTEON_CN56XX_PASS2_X) ||
            OCTEON_IS_MODEL(OCTEON_CN52XX_PASS2_X) ||
-           OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+           OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) {
                chip = &octeon_irq_chip_ciu_v2;
+               chip_edge = &octeon_irq_chip_ciu_v2_edge;
                chip_mbox = &octeon_irq_chip_ciu_mbox_v2;
                chip_wd = &octeon_irq_chip_ciu_wd_v2;
                octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio_v2;
        } else {
                chip = &octeon_irq_chip_ciu;
+               chip_edge = &octeon_irq_chip_ciu_edge;
                chip_mbox = &octeon_irq_chip_ciu_mbox;
                chip_wd = &octeon_irq_chip_ciu_wd;
                octeon_irq_gpio_chip = &octeon_irq_chip_ciu_gpio;
        }
        octeon_irq_ciu_chip = chip;
-       octeon_irq_ip4 = octeon_irq_ip4_mask;
+       octeon_irq_ciu_chip_edge = chip_edge;
 
        /* Mips internal */
        octeon_irq_init_core();
 
-       gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
-       if (gpio_node) {
-               struct octeon_irq_gpio_domain_data *gpiod;
-
-               gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
-               if (gpiod) {
-                       /* gpio domain host_data is the base hwirq number. */
-                       gpiod->base_hwirq = 16;
-                       irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
-                       of_node_put(gpio_node);
-               } else
-                       pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
-       } else
-               pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
-
-       ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-ciu");
-       if (ciu_node) {
-               ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu_ops, NULL);
-               irq_set_default_host(ciu_domain);
-               of_node_put(ciu_node);
-       } else
-               panic("Cannot find device node for cavium,octeon-3860-ciu.");
+       ciu_domain = irq_domain_add_tree(
+               ciu_node, &octeon_irq_domain_ciu_ops, dd);
+       irq_set_default_host(ciu_domain);
 
        /* CIU_0 */
-       for (i = 0; i < 16; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+       for (i = 0; i < 16; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i + 0);
+               if (r)
+                       goto err;
+       }
+
+       r = octeon_irq_set_ciu_mapping(
+               OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
+       if (r)
+               goto err;
+       r = octeon_irq_set_ciu_mapping(
+               OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
+       if (r)
+               goto err;
+
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
+               if (r)
+                       goto err;
+       }
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+               if (r)
+                       goto err;
+       }
 
-       octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX0, 0, 32, 0, chip_mbox, handle_percpu_irq);
-       octeon_irq_set_ciu_mapping(OCTEON_IRQ_MBOX1, 0, 33, 0, chip_mbox, handle_percpu_irq);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
+       if (r)
+               goto err;
 
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 0, i + 36);
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 0, i + 40);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
+       if (r)
+               goto err;
 
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI, 0, 45);
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_RML, 0, 46);
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_TIMER0, 0, i + 52);
+               if (r)
+                       goto err;
+       }
+
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
+       if (r)
+               goto err;
 
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
+       if (r)
+               goto err;
 
        /* CIU_1 */
-       for (i = 0; i < 16; i++)
-               octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd, handle_level_irq);
+       for (i = 0; i < 16; i++) {
+               r = octeon_irq_set_ciu_mapping(
+                       i + OCTEON_IRQ_WDOG0, 1, i + 0, 0, chip_wd,
+                       handle_level_irq);
+               if (r)
+                       goto err;
+       }
 
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
+       if (r)
+               goto err;
 
        /* Enable the CIU lines */
        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
-       clear_c0_status(STATUSF_IP4);
+       if (octeon_irq_use_ip4)
+               set_c0_status(STATUSF_IP4);
+       else
+               clear_c0_status(STATUSF_IP4);
+
+       return 0;
+err:
+       return r;
 }
 
+static int __init octeon_irq_init_gpio(
+       struct device_node *gpio_node, struct device_node *parent)
+{
+       struct octeon_irq_gpio_domain_data *gpiod;
+       u32 interrupt_cells;
+       unsigned int base_hwirq;
+       int r;
+
+       r = of_property_read_u32(parent, "#interrupt-cells", &interrupt_cells);
+       if (r)
+               return r;
+
+       if (interrupt_cells == 1) {
+               u32 v;
+
+               r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v);
+               if (r) {
+                       pr_warn("No \"interrupts\" property.\n");
+                       return r;
+               }
+               base_hwirq = v;
+       } else if (interrupt_cells == 2) {
+               u32 v0, v1;
+
+               r = of_property_read_u32_index(gpio_node, "interrupts", 0, &v0);
+               if (r) {
+                       pr_warn("No \"interrupts\" property.\n");
+                       return r;
+               }
+               r = of_property_read_u32_index(gpio_node, "interrupts", 1, &v1);
+               if (r) {
+                       pr_warn("No \"interrupts\" property.\n");
+                       return r;
+               }
+               base_hwirq = (v0 << 6) | v1;
+       } else {
+               pr_warn("Bad \"#interrupt-cells\" property: %u\n",
+                       interrupt_cells);
+               return -EINVAL;
+       }
+
+       gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
+       if (gpiod) {
+               /* gpio domain host_data is the base hwirq number. */
+               gpiod->base_hwirq = base_hwirq;
+               irq_domain_add_linear(
+                       gpio_node, 16, &octeon_irq_domain_gpio_ops, gpiod);
+       } else {
+               pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
+               return -ENOMEM;
+       }
+
+       return 0;
+}
 /*
  * Watchdog interrupts are special.  They are associated with a single
  * core, so we hardwire the affinity to that core.
@@ -1290,12 +1594,13 @@ static void octeon_irq_ciu2_wd_enable(struct irq_data *data)
        u64 mask;
        u64 en_addr;
        int coreid = data->irq - OCTEON_IRQ_WDOG0;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+               (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 
 }
@@ -1306,12 +1611,13 @@ static void octeon_irq_ciu2_enable(struct irq_data *data)
        u64 en_addr;
        int cpu = next_cpu_for_irq(data);
        int coreid = octeon_coreid_for_cpu(cpu);
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+               (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 }
 
@@ -1320,12 +1626,13 @@ static void octeon_irq_ciu2_enable_local(struct irq_data *data)
        u64 mask;
        u64 en_addr;
        int coreid = cvmx_get_core_num();
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(coreid) +
+               (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 
 }
@@ -1335,12 +1642,13 @@ static void octeon_irq_ciu2_disable_local(struct irq_data *data)
        u64 mask;
        u64 en_addr;
        int coreid = cvmx_get_core_num();
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(coreid) +
+               (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 
 }
@@ -1350,12 +1658,12 @@ static void octeon_irq_ciu2_ack(struct irq_data *data)
        u64 mask;
        u64 en_addr;
        int coreid = cvmx_get_core_num();
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
-       en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd.s.line);
+       en_addr = CVMX_CIU2_RAW_PPX_IP2_WRKQ(coreid) + (0x1000ull * cd->line);
        cvmx_write_csr(en_addr, mask);
 
 }
@@ -1364,13 +1672,14 @@ static void octeon_irq_ciu2_disable_all(struct irq_data *data)
 {
        int cpu;
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << (cd.s.bit);
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << (cd->bit);
 
        for_each_online_cpu(cpu) {
-               u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
+               u64 en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
+                       octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd->line);
                cvmx_write_csr(en_addr, mask);
        }
 }
@@ -1383,7 +1692,8 @@ static void octeon_irq_ciu2_mbox_enable_all(struct irq_data *data)
        mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
 
        for_each_online_cpu(cpu) {
-               u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(octeon_coreid_for_cpu(cpu));
+               u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1S(
+                       octeon_coreid_for_cpu(cpu));
                cvmx_write_csr(en_addr, mask);
        }
 }
@@ -1396,7 +1706,8 @@ static void octeon_irq_ciu2_mbox_disable_all(struct irq_data *data)
        mask = 1ull << (data->irq - OCTEON_IRQ_MBOX0);
 
        for_each_online_cpu(cpu) {
-               u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(octeon_coreid_for_cpu(cpu));
+               u64 en_addr = CVMX_CIU2_EN_PPX_IP3_MBOX_W1C(
+                       octeon_coreid_for_cpu(cpu));
                cvmx_write_csr(en_addr, mask);
        }
 }
@@ -1430,21 +1741,25 @@ static int octeon_irq_ciu2_set_affinity(struct irq_data *data,
        int cpu;
        bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
        u64 mask;
-       union octeon_ciu_chip_data cd;
+       struct octeon_ciu_chip_data *cd;
 
        if (!enable_one)
                return 0;
 
-       cd.p = irq_data_get_irq_chip_data(data);
-       mask = 1ull << cd.s.bit;
+       cd = irq_data_get_irq_chip_data(data);
+       mask = 1ull << cd->bit;
 
        for_each_online_cpu(cpu) {
                u64 en_addr;
                if (cpumask_test_cpu(cpu, dest) && enable_one) {
                        enable_one = false;
-                       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
+                       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1S(
+                               octeon_coreid_for_cpu(cpu)) +
+                               (0x1000ull * cd->line);
                } else {
-                       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(octeon_coreid_for_cpu(cpu)) + (0x1000ull * cd.s.line);
+                       en_addr = CVMX_CIU2_EN_PPX_IP2_WRKQ_W1C(
+                               octeon_coreid_for_cpu(cpu)) +
+                               (0x1000ull * cd->line);
                }
                cvmx_write_csr(en_addr, mask);
        }
@@ -1461,15 +1776,28 @@ static void octeon_irq_ciu2_enable_gpio(struct irq_data *data)
 
 static void octeon_irq_ciu2_disable_gpio(struct irq_data *data)
 {
-       union octeon_ciu_chip_data cd;
-       cd.p = irq_data_get_irq_chip_data(data);
+       struct octeon_ciu_chip_data *cd;
+
+       cd = irq_data_get_irq_chip_data(data);
 
-       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd.s.gpio_line), 0);
+       cvmx_write_csr(CVMX_GPIO_BIT_CFGX(cd->gpio_line), 0);
 
        octeon_irq_ciu2_disable_all(data);
 }
 
 static struct irq_chip octeon_irq_chip_ciu2 = {
+       .name = "CIU2-E",
+       .irq_enable = octeon_irq_ciu2_enable,
+       .irq_disable = octeon_irq_ciu2_disable_all,
+       .irq_mask = octeon_irq_ciu2_disable_local,
+       .irq_unmask = octeon_irq_ciu2_enable,
+#ifdef CONFIG_SMP
+       .irq_set_affinity = octeon_irq_ciu2_set_affinity,
+       .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+static struct irq_chip octeon_irq_chip_ciu2_edge = {
        .name = "CIU2-E",
        .irq_enable = octeon_irq_ciu2_enable,
        .irq_disable = octeon_irq_ciu2_disable_all,
@@ -1582,7 +1910,7 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
 
        if (octeon_irq_ciu2_is_edge(line, bit))
                octeon_irq_set_ciu_mapping(virq, line, bit, 0,
-                                          &octeon_irq_chip_ciu2,
+                                          &octeon_irq_chip_ciu2_edge,
                                           handle_edge_irq);
        else
                octeon_irq_set_ciu_mapping(virq, line, bit, 0,
@@ -1591,22 +1919,13 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
 
        return 0;
 }
-static int octeon_irq_ciu2_gpio_map(struct irq_domain *d,
-                                   unsigned int virq, irq_hw_number_t hw)
-{
-       return octeon_irq_gpio_map_common(d, virq, hw, 7, &octeon_irq_chip_ciu2_gpio);
-}
 
 static struct irq_domain_ops octeon_irq_domain_ciu2_ops = {
        .map = octeon_irq_ciu2_map,
+       .unmap = octeon_irq_free_cd,
        .xlate = octeon_irq_ciu2_xlat,
 };
 
-static struct irq_domain_ops octeon_irq_domain_ciu2_gpio_ops = {
-       .map = octeon_irq_ciu2_gpio_map,
-       .xlate = octeon_irq_gpio_xlat,
-};
-
 static void octeon_irq_ciu2(void)
 {
        int line;
@@ -1674,16 +1993,16 @@ out:
        return;
 }
 
-static void __init octeon_irq_init_ciu2(void)
+static int __init octeon_irq_init_ciu2(
+       struct device_node *ciu_node, struct device_node *parent)
 {
-       unsigned int i;
-       struct device_node *gpio_node;
-       struct device_node *ciu_node;
+       unsigned int i, r;
        struct irq_domain *ciu_domain = NULL;
 
        octeon_irq_init_ciu2_percpu();
        octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu2;
 
+       octeon_irq_gpio_chip = &octeon_irq_chip_ciu2_gpio;
        octeon_irq_ip2 = octeon_irq_ciu2;
        octeon_irq_ip3 = octeon_irq_ciu2_mbox;
        octeon_irq_ip4 = octeon_irq_ip4_mask;
@@ -1691,47 +2010,49 @@ static void __init octeon_irq_init_ciu2(void)
        /* Mips internal */
        octeon_irq_init_core();
 
-       gpio_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-3860-gpio");
-       if (gpio_node) {
-               struct octeon_irq_gpio_domain_data *gpiod;
-
-               gpiod = kzalloc(sizeof(*gpiod), GFP_KERNEL);
-               if (gpiod) {
-                       /* gpio domain host_data is the base hwirq number. */
-                       gpiod->base_hwirq = 7 << 6;
-                       irq_domain_add_linear(gpio_node, 16, &octeon_irq_domain_ciu2_gpio_ops, gpiod);
-                       of_node_put(gpio_node);
-               } else
-                       pr_warn("Cannot allocate memory for GPIO irq_domain.\n");
-       } else
-               pr_warn("Cannot find device node for cavium,octeon-3860-gpio.\n");
-
-       ciu_node = of_find_compatible_node(NULL, NULL, "cavium,octeon-6880-ciu2");
-       if (ciu_node) {
-               ciu_domain = irq_domain_add_tree(ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
-               irq_set_default_host(ciu_domain);
-               of_node_put(ciu_node);
-       } else
-               panic("Cannot find device node for cavium,octeon-6880-ciu2.");
+       ciu_domain = irq_domain_add_tree(
+               ciu_node, &octeon_irq_domain_ciu2_ops, NULL);
+       irq_set_default_host(ciu_domain);
 
        /* CUI2 */
-       for (i = 0; i < 64; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
+       for (i = 0; i < 64; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_WORKQ0, 0, i);
+               if (r)
+                       goto err;
+       }
 
-       for (i = 0; i < 32; i++)
-               octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
-                                          &octeon_irq_chip_ciu2_wd, handle_level_irq);
+       for (i = 0; i < 32; i++) {
+               r = octeon_irq_set_ciu_mapping(i + OCTEON_IRQ_WDOG0, 1, i, 0,
+                       &octeon_irq_chip_ciu2_wd, handle_level_irq);
+               if (r)
+                       goto err;
+       }
 
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_TIMER0, 3, i + 8);
+               if (r)
+                       goto err;
+       }
 
-       octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
+       r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
+       if (r)
+               goto err;
 
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
+               if (r)
+                       goto err;
+       }
 
-       for (i = 0; i < 4; i++)
-               octeon_irq_force_ciu_mapping(ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
+       for (i = 0; i < 4; i++) {
+               r = octeon_irq_force_ciu_mapping(
+                       ciu_domain, i + OCTEON_IRQ_PCI_MSI0, 4, i + 8);
+               if (r)
+                       goto err;
+       }
 
        irq_set_chip_and_handler(OCTEON_IRQ_MBOX0, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
        irq_set_chip_and_handler(OCTEON_IRQ_MBOX1, &octeon_irq_chip_ciu2_mbox, handle_percpu_irq);
@@ -1741,8 +2062,242 @@ static void __init octeon_irq_init_ciu2(void)
        /* Enable the CIU lines */
        set_c0_status(STATUSF_IP3 | STATUSF_IP2);
        clear_c0_status(STATUSF_IP4);
+       return 0;
+err:
+       return r;
+}
+
+struct octeon_irq_cib_host_data {
+       raw_spinlock_t lock;
+       u64 raw_reg;
+       u64 en_reg;
+       int max_bits;
+};
+
+struct octeon_irq_cib_chip_data {
+       struct octeon_irq_cib_host_data *host_data;
+       int bit;
+};
+
+static void octeon_irq_cib_enable(struct irq_data *data)
+{
+       unsigned long flags;
+       u64 en;
+       struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
+       struct octeon_irq_cib_host_data *host_data = cd->host_data;
+
+       raw_spin_lock_irqsave(&host_data->lock, flags);
+       en = cvmx_read_csr(host_data->en_reg);
+       en |= 1ull << cd->bit;
+       cvmx_write_csr(host_data->en_reg, en);
+       raw_spin_unlock_irqrestore(&host_data->lock, flags);
+}
+
+static void octeon_irq_cib_disable(struct irq_data *data)
+{
+       unsigned long flags;
+       u64 en;
+       struct octeon_irq_cib_chip_data *cd = irq_data_get_irq_chip_data(data);
+       struct octeon_irq_cib_host_data *host_data = cd->host_data;
+
+       raw_spin_lock_irqsave(&host_data->lock, flags);
+       en = cvmx_read_csr(host_data->en_reg);
+       en &= ~(1ull << cd->bit);
+       cvmx_write_csr(host_data->en_reg, en);
+       raw_spin_unlock_irqrestore(&host_data->lock, flags);
+}
+
+static int octeon_irq_cib_set_type(struct irq_data *data, unsigned int t)
+{
+       irqd_set_trigger_type(data, t);
+       return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip octeon_irq_chip_cib = {
+       .name = "CIB",
+       .irq_enable = octeon_irq_cib_enable,
+       .irq_disable = octeon_irq_cib_disable,
+       .irq_mask = octeon_irq_cib_disable,
+       .irq_unmask = octeon_irq_cib_enable,
+       .irq_set_type = octeon_irq_cib_set_type,
+};
+
+static int octeon_irq_cib_xlat(struct irq_domain *d,
+                                  struct device_node *node,
+                                  const u32 *intspec,
+                                  unsigned int intsize,
+                                  unsigned long *out_hwirq,
+                                  unsigned int *out_type)
+{
+       unsigned int type = 0;
+
+       if (intsize == 2)
+               type = intspec[1];
+
+       switch (type) {
+       case 0: /* unofficial value, but we might as well let it work. */
+       case 4: /* official value for level triggering. */
+               *out_type = IRQ_TYPE_LEVEL_HIGH;
+               break;
+       case 1: /* official value for edge triggering. */
+               *out_type = IRQ_TYPE_EDGE_RISING;
+               break;
+       default: /* Nothing else is acceptable. */
+               return -EINVAL;
+       }
+
+       *out_hwirq = intspec[0];
+
+       return 0;
+}
+
+static int octeon_irq_cib_map(struct irq_domain *d,
+                             unsigned int virq, irq_hw_number_t hw)
+{
+       struct octeon_irq_cib_host_data *host_data = d->host_data;
+       struct octeon_irq_cib_chip_data *cd;
+
+       if (hw >= host_data->max_bits) {
+               pr_err("ERROR: %s mapping %u is to big!\n",
+                      d->of_node->name, (unsigned)hw);
+               return -EINVAL;
+       }
+
+       cd = kzalloc(sizeof(*cd), GFP_KERNEL);
+       cd->host_data = host_data;
+       cd->bit = hw;
+
+       irq_set_chip_and_handler(virq, &octeon_irq_chip_cib,
+                                handle_simple_irq);
+       irq_set_chip_data(virq, cd);
+       return 0;
 }
 
+static struct irq_domain_ops octeon_irq_domain_cib_ops = {
+       .map = octeon_irq_cib_map,
+       .unmap = octeon_irq_free_cd,
+       .xlate = octeon_irq_cib_xlat,
+};
+
+/* Chain to real handler. */
+static irqreturn_t octeon_irq_cib_handler(int my_irq, void *data)
+{
+       u64 en;
+       u64 raw;
+       u64 bits;
+       int i;
+       int irq;
+       struct irq_domain *cib_domain = data;
+       struct octeon_irq_cib_host_data *host_data = cib_domain->host_data;
+
+       en = cvmx_read_csr(host_data->en_reg);
+       raw = cvmx_read_csr(host_data->raw_reg);
+
+       bits = en & raw;
+
+       for (i = 0; i < host_data->max_bits; i++) {
+               if ((bits & 1ull << i) == 0)
+                       continue;
+               irq = irq_find_mapping(cib_domain, i);
+               if (!irq) {
+                       unsigned long flags;
+
+                       pr_err("ERROR: CIB bit %d@%llx IRQ unhandled, disabling\n",
+                               i, host_data->raw_reg);
+                       raw_spin_lock_irqsave(&host_data->lock, flags);
+                       en = cvmx_read_csr(host_data->en_reg);
+                       en &= ~(1ull << i);
+                       cvmx_write_csr(host_data->en_reg, en);
+                       cvmx_write_csr(host_data->raw_reg, 1ull << i);
+                       raw_spin_unlock_irqrestore(&host_data->lock, flags);
+               } else {
+                       struct irq_desc *desc = irq_to_desc(irq);
+                       struct irq_data *irq_data = irq_desc_get_irq_data(desc);
+                       /* If edge, acknowledge the bit we will be sending. */
+                       if (irqd_get_trigger_type(irq_data) &
+                               IRQ_TYPE_EDGE_BOTH)
+                               cvmx_write_csr(host_data->raw_reg, 1ull << i);
+                       generic_handle_irq_desc(irq, desc);
+               }
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int __init octeon_irq_init_cib(struct device_node *ciu_node,
+                                     struct device_node *parent)
+{
+       const __be32 *addr;
+       u32 val;
+       struct octeon_irq_cib_host_data *host_data;
+       int parent_irq;
+       int r;
+       struct irq_domain *cib_domain;
+
+       parent_irq = irq_of_parse_and_map(ciu_node, 0);
+       if (!parent_irq) {
+               pr_err("ERROR: Couldn't acquire parent_irq for %s\n.",
+                       ciu_node->name);
+               return -EINVAL;
+       }
+
+       host_data = kzalloc(sizeof(*host_data), GFP_KERNEL);
+       raw_spin_lock_init(&host_data->lock);
+
+       addr = of_get_address(ciu_node, 0, NULL, NULL);
+       if (!addr) {
+               pr_err("ERROR: Couldn't acquire reg(0) %s\n.", ciu_node->name);
+               return -EINVAL;
+       }
+       host_data->raw_reg = (u64)phys_to_virt(
+               of_translate_address(ciu_node, addr));
+
+       addr = of_get_address(ciu_node, 1, NULL, NULL);
+       if (!addr) {
+               pr_err("ERROR: Couldn't acquire reg(1) %s\n.", ciu_node->name);
+               return -EINVAL;
+       }
+       host_data->en_reg = (u64)phys_to_virt(
+               of_translate_address(ciu_node, addr));
+
+       r = of_property_read_u32(ciu_node, "cavium,max-bits", &val);
+       if (r) {
+               pr_err("ERROR: Couldn't read cavium,max-bits from %s\n.",
+                       ciu_node->name);
+               return r;
+       }
+       host_data->max_bits = val;
+
+       cib_domain = irq_domain_add_linear(ciu_node, host_data->max_bits,
+                                          &octeon_irq_domain_cib_ops,
+                                          host_data);
+       if (!cib_domain) {
+               pr_err("ERROR: Couldn't irq_domain_add_linear()\n.");
+               return -ENOMEM;
+       }
+
+       cvmx_write_csr(host_data->en_reg, 0); /* disable all IRQs */
+       cvmx_write_csr(host_data->raw_reg, ~0); /* ack any outstanding */
+
+       r = request_irq(parent_irq, octeon_irq_cib_handler,
+                       IRQF_NO_THREAD, "cib", cib_domain);
+       if (r) {
+               pr_err("request_irq cib failed %d\n", r);
+               return r;
+       }
+       pr_info("CIB interrupt controller probed: %llx %d\n",
+               host_data->raw_reg, host_data->max_bits);
+       return 0;
+}
+
+static struct of_device_id ciu_types[] __initdata = {
+       {.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
+       {.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
+       {.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
+       {.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
+       {}
+};
+
 void __init arch_init_irq(void)
 {
 #ifdef CONFIG_SMP
@@ -1750,10 +2305,7 @@ void __init arch_init_irq(void)
        cpumask_clear(irq_default_affinity);
        cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
 #endif
-       if (OCTEON_IS_MODEL(OCTEON_CN68XX))
-               octeon_irq_init_ciu2();
-       else
-               octeon_irq_init_ciu();
+       of_irq_init(ciu_types);
 }
 
 asmlinkage void plat_irq_dispatch(void)
@@ -1767,13 +2319,13 @@ asmlinkage void plat_irq_dispatch(void)
                cop0_cause &= cop0_status;
                cop0_cause &= ST0_IM;
 
-               if (unlikely(cop0_cause & STATUSF_IP2))
+               if (cop0_cause & STATUSF_IP2)
                        octeon_irq_ip2();
-               else if (unlikely(cop0_cause & STATUSF_IP3))
+               else if (cop0_cause & STATUSF_IP3)
                        octeon_irq_ip3();
-               else if (unlikely(cop0_cause & STATUSF_IP4))
+               else if (cop0_cause & STATUSF_IP4)
                        octeon_irq_ip4();
-               else if (likely(cop0_cause))
+               else if (cop0_cause)
                        do_IRQ(fls(cop0_cause) - 9 + MIPS_CPU_IRQ_BASE);
                else
                        break;
index 94f888d3384e247b542b15faae2da18147088f24..a42110e7edbcabefdea9a011de9e032f88388a87 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm/octeon/octeon.h>
 #include <asm/octeon/pci-octeon.h>
 #include <asm/octeon/cvmx-mio-defs.h>
+#include <asm/octeon/cvmx-rst-defs.h>
 
 extern struct plat_smp_ops octeon_smp_ops;
 
@@ -579,12 +580,10 @@ void octeon_user_io_init(void)
        /* R/W If set, CVMSEG is available for loads/stores in user
         * mode. */
        cvmmemctl.s.cvmsegenau = 0;
-       /* R/W Size of local memory in cache blocks, 54 (6912 bytes)
-        * is max legal value. */
-       cvmmemctl.s.lmemsz = CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE;
 
        write_c0_cvmmemctl(cvmmemctl.u64);
 
+       /* Setup of CVMSEG is done in kernel-entry-init.h */
        if (smp_processor_id() == 0)
                pr_notice("CVMSEG size: %d cache lines (%d bytes)\n",
                          CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
@@ -615,6 +614,7 @@ void __init prom_init(void)
        const char *arg;
        char *p;
        int i;
+       u64 t;
        int argc;
 #ifdef CONFIG_CAVIUM_RESERVE32
        int64_t addr = -1;
@@ -654,15 +654,56 @@ void __init prom_init(void)
        sysinfo->dfa_ref_clock_hz = octeon_bootinfo->dfa_ref_clock_hz;
        sysinfo->bootloader_config_flags = octeon_bootinfo->config_flags;
 
-       if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
+       if (OCTEON_IS_OCTEON2()) {
                /* I/O clock runs at a different rate than the CPU. */
                union cvmx_mio_rst_boot rst_boot;
                rst_boot.u64 = cvmx_read_csr(CVMX_MIO_RST_BOOT);
                octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
+       } else if (OCTEON_IS_OCTEON3()) {
+               /* I/O clock runs at a different rate than the CPU. */
+               union cvmx_rst_boot rst_boot;
+               rst_boot.u64 = cvmx_read_csr(CVMX_RST_BOOT);
+               octeon_io_clock_rate = 50000000 * rst_boot.s.pnr_mul;
        } else {
                octeon_io_clock_rate = sysinfo->cpu_clock_hz;
        }
 
+       t = read_c0_cvmctl();
+       if ((t & (1ull << 27)) == 0) {
+               /*
+                * Setup the multiplier save/restore code if
+                * CvmCtl[NOMUL] clear.
+                */
+               void *save;
+               void *save_end;
+               void *restore;
+               void *restore_end;
+               int save_len;
+               int restore_len;
+               int save_max = (char *)octeon_mult_save_end -
+                       (char *)octeon_mult_save;
+               int restore_max = (char *)octeon_mult_restore_end -
+                       (char *)octeon_mult_restore;
+               if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON3) {
+                       save = octeon_mult_save3;
+                       save_end = octeon_mult_save3_end;
+                       restore = octeon_mult_restore3;
+                       restore_end = octeon_mult_restore3_end;
+               } else {
+                       save = octeon_mult_save2;
+                       save_end = octeon_mult_save2_end;
+                       restore = octeon_mult_restore2;
+                       restore_end = octeon_mult_restore2_end;
+               }
+               save_len = (char *)save_end - (char *)save;
+               restore_len = (char *)restore_end - (char *)restore;
+               if (!WARN_ON(save_len > save_max ||
+                               restore_len > restore_max)) {
+                       memcpy(octeon_mult_save, save, save_len);
+                       memcpy(octeon_mult_restore, restore, restore_len);
+               }
+       }
+
        /*
         * Only enable the LED controller if we're running on a CN38XX, CN58XX,
         * or CN56XX. The CN30XX and CN31XX don't have an LED controller.
@@ -1004,7 +1045,7 @@ EXPORT_SYMBOL(prom_putchar);
 
 void prom_free_prom_memory(void)
 {
-       if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X)) {
+       if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR) {
                /* Check for presence of Core-14449 fix.  */
                u32 insn;
                u32 *foo;
@@ -1026,8 +1067,9 @@ void prom_free_prom_memory(void)
                        panic("No PREF instruction at Core-14449 probe point.");
 
                if (((insn >> 16) & 0x1f) != 28)
-                       panic("Core-14449 WAR not in place (%04x).\n"
-                             "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).", insn);
+                       panic("OCTEON II DCache prefetch workaround not in place (%04x).\n"
+                             "Please build kernel with proper options (CONFIG_CAVIUM_CN63XXP1).",
+                             insn);
        }
 }
 
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
new file mode 100644 (file)
index 0000000..4bce1f8
--- /dev/null
@@ -0,0 +1,193 @@
+CONFIG_MIPS_MALTA=y
+CONFIG_CPU_LITTLE_ENDIAN=y
+CONFIG_CPU_MIPS32_R6=y
+CONFIG_PAGE_SIZE_16KB=y
+CONFIG_HZ_100=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_SYSCTL_SYSCALL=y
+CONFIG_EMBEDDED=y
+CONFIG_SLAB=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_MODULE_SRCVERSION_ALL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PCI=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_XFRM_USER=m
+CONFIG_NET_KEY=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_IP_ROUTE_MULTIPATH=y
+CONFIG_IP_ROUTE_VERBOSE=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_NET_IPIP=m
+CONFIG_IP_MROUTE=y
+CONFIG_IP_PIMSM_V1=y
+CONFIG_IP_PIMSM_V2=y
+CONFIG_SYN_COOKIES=y
+CONFIG_INET_AH=m
+CONFIG_INET_ESP=m
+CONFIG_INET_IPCOMP=m
+# CONFIG_INET_LRO is not set
+CONFIG_INET6_AH=m
+CONFIG_INET6_ESP=m
+CONFIG_INET6_IPCOMP=m
+CONFIG_IPV6_TUNNEL=m
+CONFIG_BRIDGE=m
+CONFIG_VLAN_8021Q=m
+CONFIG_ATALK=m
+CONFIG_DEV_APPLETALK=m
+CONFIG_IPDDP=m
+CONFIG_IPDDP_ENCAP=y
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_CBQ=m
+CONFIG_NET_SCH_HTB=m
+CONFIG_NET_SCH_HFSC=m
+CONFIG_NET_SCH_PRIO=m
+CONFIG_NET_SCH_RED=m
+CONFIG_NET_SCH_SFQ=m
+CONFIG_NET_SCH_TEQL=m
+CONFIG_NET_SCH_TBF=m
+CONFIG_NET_SCH_GRED=m
+CONFIG_NET_SCH_DSMARK=m
+CONFIG_NET_SCH_NETEM=m
+CONFIG_NET_SCH_INGRESS=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_TCINDEX=m
+CONFIG_NET_CLS_ROUTE4=m
+CONFIG_NET_CLS_FW=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_CLS_RSVP=m
+CONFIG_NET_CLS_RSVP6=m
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_CLS_IND=y
+# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_CRYPTOLOOP=m
+CONFIG_IDE=y
+# CONFIG_IDE_PROC_FS is not set
+# CONFIG_IDEPCI_PCIBUS_ORDER is not set
+CONFIG_BLK_DEV_GENERIC=y
+CONFIG_BLK_DEV_PIIX=y
+CONFIG_SCSI=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_CHR_DEV_SG=y
+# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_NETDEVICES=y
+# CONFIG_NET_VENDOR_3COM is not set
+# CONFIG_NET_VENDOR_ADAPTEC is not set
+# CONFIG_NET_VENDOR_ALTEON is not set
+CONFIG_PCNET32=y
+# CONFIG_NET_VENDOR_ATHEROS is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_BROCADE is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_CISCO is not set
+# CONFIG_NET_VENDOR_DEC is not set
+# CONFIG_NET_VENDOR_DLINK is not set
+# CONFIG_NET_VENDOR_EMULEX is not set
+# CONFIG_NET_VENDOR_EXAR is not set
+# CONFIG_NET_VENDOR_HP is not set
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MELLANOX is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MYRI is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NVIDIA is not set
+# CONFIG_NET_VENDOR_OKI is not set
+# CONFIG_NET_PACKET_ENGINE is not set
+# CONFIG_NET_VENDOR_QLOGIC is not set
+# CONFIG_NET_VENDOR_REALTEK is not set
+# CONFIG_NET_VENDOR_RDC is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+# CONFIG_NET_VENDOR_SILAN is not set
+# CONFIG_NET_VENDOR_SIS is not set
+# CONFIG_NET_VENDOR_SMSC is not set
+# CONFIG_NET_VENDOR_STMICRO is not set
+# CONFIG_NET_VENDOR_SUN is not set
+# CONFIG_NET_VENDOR_TEHUTI is not set
+# CONFIG_NET_VENDOR_TI is not set
+# CONFIG_NET_VENDOR_TOSHIBA is not set
+# CONFIG_NET_VENDOR_VIA is not set
+# CONFIG_NET_VENDOR_WIZNET is not set
+# CONFIG_WLAN is not set
+# CONFIG_VT is not set
+CONFIG_LEGACY_PTY_COUNT=4
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_HW_RANDOM=y
+# CONFIG_HWMON is not set
+CONFIG_FB=y
+CONFIG_FIRMWARE_EDID=y
+CONFIG_FB_MATROX=y
+CONFIG_FB_MATROX_G=y
+CONFIG_USB=y
+CONFIG_USB_EHCI_HCD=y
+# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_UHCI_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
+CONFIG_LEDS_TRIGGER_BACKLIGHT=y
+CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_DRV_CMOS=y
+CONFIG_EXT2_FS=y
+CONFIG_EXT3_FS=y
+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
+CONFIG_XFS_FS=y
+CONFIG_XFS_QUOTA=y
+CONFIG_XFS_POSIX_ACL=y
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_MSDOS_FS=m
+CONFIG_VFAT_FS=m
+CONFIG_PROC_KCORE=y
+CONFIG_TMPFS=y
+CONFIG_NFS_FS=y
+CONFIG_ROOT_NFS=y
+CONFIG_CIFS=m
+CONFIG_CIFS_WEAK_PW_HASH=y
+CONFIG_CIFS_XATTR=y
+CONFIG_CIFS_POSIX=y
+CONFIG_NLS_CODEPAGE_437=m
+CONFIG_NLS_ISO8859_1=m
+# CONFIG_FTRACE is not set
+CONFIG_CRYPTO_NULL=m
+CONFIG_CRYPTO_PCBC=m
+CONFIG_CRYPTO_HMAC=y
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_TGR192=m
+CONFIG_CRYPTO_WP512=m
+CONFIG_CRYPTO_ANUBIS=m
+CONFIG_CRYPTO_BLOWFISH=m
+CONFIG_CRYPTO_CAST5=m
+CONFIG_CRYPTO_CAST6=m
+CONFIG_CRYPTO_KHAZAD=m
+CONFIG_CRYPTO_SERPENT=m
+CONFIG_CRYPTO_TEA=m
+CONFIG_CRYPTO_TWOFISH=m
+# CONFIG_CRYPTO_ANSI_CPRNG is not set
+# CONFIG_CRYPTO_HW is not set
index f9f5307434c276a35624f5c137333867e2357794..19f710117d974bf2a0da97764e7bc3ec428b3fc8 100644 (file)
@@ -9,6 +9,7 @@
  * Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
  * Copyright (C) 1999 Silicon Graphics, Inc.
  */
+#include <linux/compiler.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/irqflags.h>
 #include <asm/sgialib.h>
 #include <asm/bootinfo.h>
 
-VOID
+VOID __noreturn
 ArcHalt(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(halt);
-never: goto never;
+
+       unreachable();
 }
 
-VOID
+VOID __noreturn
 ArcPowerDown(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(pdown);
-never: goto never;
+
+       unreachable();
 }
 
 /* XXX is this a soft reset basically? XXX */
-VOID
+VOID __noreturn
 ArcRestart(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(restart);
-never: goto never;
+
+       unreachable();
 }
 
-VOID
+VOID __noreturn
 ArcReboot(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(reboot);
-never: goto never;
+
+       unreachable();
 }
 
-VOID
+VOID __noreturn
 ArcEnterInteractiveMode(VOID)
 {
        bc_disable();
        local_irq_disable();
        ARC_CALL0(imode);
-never: goto never;
+
+       unreachable();
 }
 
 LONG
index 200efeac41813c2a17e237156c6410253f488d8b..526539cbc99f6792b21d35dbfc1d044f0f817a87 100644 (file)
@@ -1,4 +1,5 @@
 # MIPS headers
+generic-(CONFIG_GENERIC_CSUM) += checksum.h
 generic-y += cputime.h
 generic-y += current.h
 generic-y += dma-contiguous.h
index 6caf8766b80f161ea7a620ecc33cca7768e1d4b0..0cae4595e985bbc3d8043b3bb85aef66c582615b 100644 (file)
@@ -19,7 +19,7 @@
 #include <asm/asmmacro-64.h>
 #endif
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        .macro  local_irq_enable reg=t0
        ei
        irq_enable_hazard
        .endm
 
        .macro  fpu_save_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        sll     \tmp, \status, 5
        bgez    \tmp, 10f
        fpu_save_16odd \thread
        .endm
 
        .macro  fpu_restore_double thread status tmp
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        sll     \tmp, \status, 5
        bgez    \tmp, 10f                               # 16 register mode?
 
        fpu_restore_16even \thread \tmp
        .endm
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        .macro  _EXT    rd, rs, p, s
        ext     \rd, \rs, \p, \s
        .endm
-#else /* !CONFIG_CPU_MIPSR2 */
+#else /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
        .macro  _EXT    rd, rs, p, s
        srl     \rd, \rs, \p
        andi    \rd, \rd, (1 << \s) - 1
        .endm
-#endif /* !CONFIG_CPU_MIPSR2 */
+#endif /* !CONFIG_CPU_MIPSR2 || !CONFIG_CPU_MIPSR6 */
 
 /*
  * Temporary until all gas have MT ASE support
        .set    push
        .set    noat
        SET_HARDFLOAT
-       add     $1, \base, \off
+       addu    $1, \base, \off
        .word   LDD_MSA_INSN | (\wd << 6)
        .set    pop
        .endm
        .set    push
        .set    noat
        SET_HARDFLOAT
-       add     $1, \base, \off
+       addu    $1, \base, \off
        .word   STD_MSA_INSN | (\wd << 6)
        .set    pop
        .endm
index 857da84cfc92eb20bd7f29cb5d9b3c1e16b86203..26d436336f2e18c9e8a3e5fd888e2f97bdb0a2dd 100644 (file)
@@ -54,19 +54,19 @@ static __inline__ void atomic_##op(int i, atomic_t * v)                           \
                "       sc      %0, %1                                  \n"   \
                "       beqzl   %0, 1b                                  \n"   \
                "       .set    mips0                                   \n"   \
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)              \
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                int temp;                                                     \
                                                                              \
                do {                                                          \
                        __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
                        "       ll      %0, %1          # atomic_" #op "\n"   \
                        "       " #asm_op " %0, %2                      \n"   \
                        "       sc      %0, %1                          \n"   \
                        "       .set    mips0                           \n"   \
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)      \
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
                        : "Ir" (i));                                          \
                } while (unlikely(!temp));                                    \
        } else {                                                              \
@@ -97,20 +97,20 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v)                   \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (result), "=&r" (temp),                               \
-                 "+" GCC_OFF12_ASM() (v->counter)                            \
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                int temp;                                                     \
                                                                              \
                do {                                                          \
                        __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
                        "       ll      %1, %2  # atomic_" #op "_return \n"   \
                        "       " #asm_op " %0, %1, %3                  \n"   \
                        "       sc      %0, %2                          \n"   \
                        "       .set    mips0                           \n"   \
                        : "=&r" (result), "=&r" (temp),                       \
-                         "+" GCC_OFF12_ASM() (v->counter)                    \
+                         "+" GCC_OFF_SMALL_ASM() (v->counter)                \
                        : "Ir" (i));                                          \
                } while (unlikely(!result));                                  \
                                                                              \
@@ -171,14 +171,14 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF12_ASM() (v->counter)
-               : "Ir" (i), GCC_OFF12_ASM() (v->counter)
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
+               : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
                : "memory");
        } else if (kernel_uses_llsc) {
                int temp;
 
                __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "1:     ll      %1, %2          # atomic_sub_if_positive\n"
                "       subu    %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
@@ -190,7 +190,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF12_ASM() (v->counter)
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
                : "Ir" (i));
        } else {
                unsigned long flags;
@@ -333,19 +333,19 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v)                    \
                "       scd     %0, %1                                  \n"   \
                "       beqzl   %0, 1b                                  \n"   \
                "       .set    mips0                                   \n"   \
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)              \
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)          \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                long temp;                                                    \
                                                                              \
                do {                                                          \
                        __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
                        "       lld     %0, %1          # atomic64_" #op "\n" \
                        "       " #asm_op " %0, %2                      \n"   \
                        "       scd     %0, %1                          \n"   \
                        "       .set    mips0                           \n"   \
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter)      \
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter)      \
                        : "Ir" (i));                                          \
                } while (unlikely(!temp));                                    \
        } else {                                                              \
@@ -376,21 +376,21 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v)           \
                "       " #asm_op " %0, %1, %3                          \n"   \
                "       .set    mips0                                   \n"   \
                : "=&r" (result), "=&r" (temp),                               \
-                 "+" GCC_OFF12_ASM() (v->counter)                            \
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)                        \
                : "Ir" (i));                                                  \
        } else if (kernel_uses_llsc) {                                        \
                long temp;                                                    \
                                                                              \
                do {                                                          \
                        __asm__ __volatile__(                                 \
-                       "       .set    arch=r4000                      \n"   \
+                       "       .set    "MIPS_ISA_LEVEL"                \n"   \
                        "       lld     %1, %2  # atomic64_" #op "_return\n"  \
                        "       " #asm_op " %0, %1, %3                  \n"   \
                        "       scd     %0, %2                          \n"   \
                        "       .set    mips0                           \n"   \
                        : "=&r" (result), "=&r" (temp),                       \
-                         "=" GCC_OFF12_ASM() (v->counter)                    \
-                       : "Ir" (i), GCC_OFF12_ASM() (v->counter)              \
+                         "=" GCC_OFF_SMALL_ASM() (v->counter)                \
+                       : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)          \
                        : "memory");                                          \
                } while (unlikely(!result));                                  \
                                                                              \
@@ -452,14 +452,14 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
-                 "=" GCC_OFF12_ASM() (v->counter)
-               : "Ir" (i), GCC_OFF12_ASM() (v->counter)
+                 "=" GCC_OFF_SMALL_ASM() (v->counter)
+               : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter)
                : "memory");
        } else if (kernel_uses_llsc) {
                long temp;
 
                __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "1:     lld     %1, %2          # atomic64_sub_if_positive\n"
                "       dsubu   %0, %1, %3                              \n"
                "       bltz    %0, 1f                                  \n"
@@ -471,7 +471,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                "1:                                                     \n"
                "       .set    mips0                                   \n"
                : "=&r" (result), "=&r" (temp),
-                 "+" GCC_OFF12_ASM() (v->counter)
+                 "+" GCC_OFF_SMALL_ASM() (v->counter)
                : "Ir" (i));
        } else {
                unsigned long flags;
index 6663bcca9d0c626886529ae5eb7a92e75b75cd46..9f935f6aa996ddfd573b9ccb4f6261118934a415 100644 (file)
@@ -79,28 +79,28 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
                "       " __SC  "%0, %1                                 \n"
                "       beqzl   %0, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "=" GCC_OFF12_ASM() (*m)
-               : "ir" (1UL << bit), GCC_OFF12_ASM() (*m));
-#ifdef CONFIG_CPU_MIPSR2
+               : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*m)
+               : "ir" (1UL << bit), GCC_OFF_SMALL_ASM() (*m));
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
                do {
                        __asm__ __volatile__(
                        "       " __LL "%0, %1          # set_bit       \n"
                        "       " __INS "%0, %3, %2, 1                  \n"
                        "       " __SC "%0, %1                          \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (bit), "r" (~0));
                } while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
        } else if (kernel_uses_llsc) {
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1          # set_bit       \n"
                        "       or      %0, %2                          \n"
                        "       " __SC  "%0, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (1UL << bit));
                } while (unlikely(!temp));
        } else
@@ -131,28 +131,28 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
                "       " __SC "%0, %1                                  \n"
                "       beqzl   %0, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                : "ir" (~(1UL << bit)));
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
                do {
                        __asm__ __volatile__(
                        "       " __LL "%0, %1          # clear_bit     \n"
                        "       " __INS "%0, $0, %2, 1                  \n"
                        "       " __SC "%0, %1                          \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (bit));
                } while (unlikely(!temp));
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
        } else if (kernel_uses_llsc) {
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1          # clear_bit     \n"
                        "       and     %0, %2                          \n"
                        "       " __SC "%0, %1                          \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (~(1UL << bit)));
                } while (unlikely(!temp));
        } else
@@ -197,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
                "       " __SC  "%0, %1                         \n"
                "       beqzl   %0, 1b                          \n"
                "       .set    mips0                           \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                : "ir" (1UL << bit));
        } else if (kernel_uses_llsc) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -205,12 +205,12 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1          # change_bit    \n"
                        "       xor     %0, %2                          \n"
                        "       " __SC  "%0, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m)
                        : "ir" (1UL << bit));
                } while (unlikely(!temp));
        } else
@@ -245,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr,
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                : "r" (1UL << bit)
                : "memory");
        } else if (kernel_uses_llsc) {
@@ -254,12 +254,12 @@ static inline int test_and_set_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1  # test_and_set_bit      \n"
                        "       or      %2, %0, %3                      \n"
                        "       " __SC  "%2, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
                } while (unlikely(!res));
@@ -308,12 +308,12 @@ static inline int test_and_set_bit_lock(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL "%0, %1  # test_and_set_bit      \n"
                        "       or      %2, %0, %3                      \n"
                        "       " __SC  "%2, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
                } while (unlikely(!res));
@@ -355,10 +355,10 @@ static inline int test_and_clear_bit(unsigned long nr,
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                : "r" (1UL << bit)
                : "memory");
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
                unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
                unsigned long temp;
@@ -369,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr,
                        "       " __EXT "%2, %0, %3, 1                  \n"
                        "       " __INS "%0, $0, %3, 1                  \n"
                        "       " __SC  "%0, %1                         \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "ir" (bit)
                        : "memory");
                } while (unlikely(!temp));
@@ -380,13 +380,13 @@ static inline int test_and_clear_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL  "%0, %1 # test_and_clear_bit    \n"
                        "       or      %2, %0, %3                      \n"
                        "       xor     %2, %3                          \n"
                        "       " __SC  "%2, %1                         \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
                } while (unlikely(!res));
@@ -428,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr,
                "       beqzl   %2, 1b                                  \n"
                "       and     %2, %0, %3                              \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+               : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                : "r" (1UL << bit)
                : "memory");
        } else if (kernel_uses_llsc) {
@@ -437,12 +437,12 @@ static inline int test_and_change_bit(unsigned long nr,
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       " __LL  "%0, %1 # test_and_change_bit   \n"
                        "       xor     %2, %0, %3                      \n"
                        "       " __SC  "\t%2, %1                       \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
+                       : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (*m), "=&r" (res)
                        : "r" (1UL << bit)
                        : "memory");
                } while (unlikely(!res));
@@ -485,7 +485,7 @@ static inline unsigned long __fls(unsigned long word)
            __builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
                __asm__(
                "       .set    push                                    \n"
-               "       .set    mips32                                  \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "       clz     %0, %1                                  \n"
                "       .set    pop                                     \n"
                : "=r" (num)
@@ -498,7 +498,7 @@ static inline unsigned long __fls(unsigned long word)
            __builtin_constant_p(cpu_has_mips64) && cpu_has_mips64) {
                __asm__(
                "       .set    push                                    \n"
-               "       .set    mips64                                  \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "       dclz    %0, %1                                  \n"
                "       .set    pop                                     \n"
                : "=r" (num)
@@ -562,7 +562,7 @@ static inline int fls(int x)
        if (__builtin_constant_p(cpu_has_clo_clz) && cpu_has_clo_clz) {
                __asm__(
                "       .set    push                                    \n"
-               "       .set    mips32                                  \n"
+               "       .set    "MIPS_ISA_LEVEL"                        \n"
                "       clz     %0, %1                                  \n"
                "       .set    pop                                     \n"
                : "=r" (x)
index 3418c51e11512ed2a3957448fbb68d83ebac1858..5c585c5c1c3e3fe3ee6bacdfd8ce0f59be82e27e 100644 (file)
 #ifndef _ASM_CHECKSUM_H
 #define _ASM_CHECKSUM_H
 
+#ifdef CONFIG_GENERIC_CSUM
+#include <asm-generic/checksum.h>
+#else
+
 #include <linux/in6.h>
 
 #include <asm/uaccess.h>
@@ -99,27 +103,23 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len,
  */
 __wsum csum_partial_copy_nocheck(const void *src, void *dst,
                                       int len, __wsum sum);
+#define csum_partial_copy_nocheck csum_partial_copy_nocheck
 
 /*
  *     Fold a partial checksum without adding pseudo headers
  */
-static inline __sum16 csum_fold(__wsum sum)
+static inline __sum16 csum_fold(__wsum csum)
 {
-       __asm__(
-       "       .set    push            # csum_fold\n"
-       "       .set    noat            \n"
-       "       sll     $1, %0, 16      \n"
-       "       addu    %0, $1          \n"
-       "       sltu    $1, %0, $1      \n"
-       "       srl     %0, %0, 16      \n"
-       "       addu    %0, $1          \n"
-       "       xori    %0, 0xffff      \n"
-       "       .set    pop"
-       : "=r" (sum)
-       : "0" (sum));
+       u32 sum = (__force u32)csum;;
 
-       return (__force __sum16)sum;
+       sum += (sum << 16);
+       csum = (sum < csum);
+       sum >>= 16;
+       sum += csum;
+
+       return (__force __sum16)~sum;
 }
+#define csum_fold csum_fold
 
 /*
  *     This is a version of ip_compute_csum() optimized for IP headers,
@@ -158,6 +158,7 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
 
        return csum_fold(csum);
 }
+#define ip_fast_csum ip_fast_csum
 
 static inline __wsum csum_tcpudp_nofold(__be32 saddr,
        __be32 daddr, unsigned short len, unsigned short proto,
@@ -200,18 +201,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr,
 
        return sum;
 }
-
-/*
- * computes the checksum of the TCP/UDP pseudo-header
- * returns a 16-bit checksum, already complemented
- */
-static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-                                                  unsigned short len,
-                                                  unsigned short proto,
-                                                  __wsum sum)
-{
-       return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
-}
+#define csum_tcpudp_nofold csum_tcpudp_nofold
 
 /*
  * this routine is used for miscellaneous IP-like checksums, mainly
@@ -287,4 +277,7 @@ static __inline__ __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
        return csum_fold(sum);
 }
 
+#include <asm-generic/checksum.h>
+#endif /* CONFIG_GENERIC_CSUM */
+
 #endif /* _ASM_CHECKSUM_H */
index 28b1edf195016b80a89854b9ae11c28a1a9c04ce..d0a2a68ca600670ead4d5535751267f6889e93f4 100644 (file)
@@ -31,24 +31,24 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
                "       sc      %2, %1                                  \n"
                "       beqzl   %2, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
-               : GCC_OFF12_ASM() (*m), "Jr" (val)
+               : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
                : "memory");
        } else if (kernel_uses_llsc) {
                unsigned long dummy;
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       ll      %0, %3          # xchg_u32      \n"
                        "       .set    mips0                           \n"
                        "       move    %2, %z4                         \n"
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       sc      %2, %1                          \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+                       : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
                          "=&r" (dummy)
-                       : GCC_OFF12_ASM() (*m), "Jr" (val)
+                       : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
                        : "memory");
                } while (unlikely(!dummy));
        } else {
@@ -82,22 +82,22 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
                "       scd     %2, %1                                  \n"
                "       beqzl   %2, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
-               : GCC_OFF12_ASM() (*m), "Jr" (val)
+               : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m), "=&r" (dummy)
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
                : "memory");
        } else if (kernel_uses_llsc) {
                unsigned long dummy;
 
                do {
                        __asm__ __volatile__(
-                       "       .set    arch=r4000                      \n"
+                       "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"
                        "       lld     %0, %3          # xchg_u64      \n"
                        "       move    %2, %z4                         \n"
                        "       scd     %2, %1                          \n"
                        "       .set    mips0                           \n"
-                       : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
+                       : "=&r" (retval), "=" GCC_OFF_SMALL_ASM() (*m),
                          "=&r" (dummy)
-                       : GCC_OFF12_ASM() (*m), "Jr" (val)
+                       : GCC_OFF_SMALL_ASM() (*m), "Jr" (val)
                        : "memory");
                } while (unlikely(!dummy));
        } else {
@@ -158,25 +158,25 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
                "       beqzl   $1, 1b                          \n"     \
                "2:                                             \n"     \
                "       .set    pop                             \n"     \
-               : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m)               \
-               : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new)          \
+               : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)           \
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)              \
                : "memory");                                            \
        } else if (kernel_uses_llsc) {                                  \
                __asm__ __volatile__(                                   \
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
-               "       .set    arch=r4000                      \n"     \
+               "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "1:     " ld "  %0, %2          # __cmpxchg_asm \n"     \
                "       bne     %0, %z3, 2f                     \n"     \
                "       .set    mips0                           \n"     \
                "       move    $1, %z4                         \n"     \
-               "       .set    arch=r4000                      \n"     \
+               "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "       " st "  $1, %1                          \n"     \
                "       beqz    $1, 1b                          \n"     \
                "       .set    pop                             \n"     \
                "2:                                             \n"     \
-               : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m)               \
-               : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new)          \
+               : "=&r" (__ret), "=" GCC_OFF_SMALL_ASM() (*m)           \
+               : GCC_OFF_SMALL_ASM() (*m), "Jr" (old), "Jr" (new)              \
                : "memory");                                            \
        } else {                                                        \
                unsigned long __flags;                                  \
index c73815e0123a756bc0579806c7e8333b26712d08..e081a265f4227475d17fef0d53cf07f7e9ffb6a3 100644 (file)
 #define GCC_REG_ACCUM "accum"
 #endif
 
+#ifdef CONFIG_CPU_MIPSR6
+/* All MIPS R6 toolchains support the ZC constrain */
+#define GCC_OFF_SMALL_ASM() "ZC"
+#else
 #ifndef CONFIG_CPU_MICROMIPS
-#define GCC_OFF12_ASM() "R"
+#define GCC_OFF_SMALL_ASM() "R"
 #elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
-#define GCC_OFF12_ASM() "ZC"
+#define GCC_OFF_SMALL_ASM() "ZC"
 #else
 #error "microMIPS compilation unsupported with GCC older than 4.9"
-#endif
+#endif /* CONFIG_CPU_MICROMIPS */
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#ifdef CONFIG_CPU_MIPSR6
+#define MIPS_ISA_LEVEL "mips64r6"
+#define MIPS_ISA_ARCH_LEVEL MIPS_ISA_LEVEL
+#define MIPS_ISA_LEVEL_RAW mips64r6
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#else
+/* MIPS64 is a superset of MIPS32 */
+#define MIPS_ISA_LEVEL "mips64r2"
+#define MIPS_ISA_ARCH_LEVEL "arch=r4000"
+#define MIPS_ISA_LEVEL_RAW mips64r2
+#define MIPS_ISA_ARCH_LEVEL_RAW MIPS_ISA_LEVEL_RAW
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #endif /* _ASM_COMPILER_H */
index 2897cfafcaf097f01e17a561488eafe032924933..0d8208de9a3fadaff9308544af2c955f7b967a34 100644 (file)
@@ -38,6 +38,9 @@
 #ifndef cpu_has_maar
 #define cpu_has_maar           (cpu_data[0].options & MIPS_CPU_MAAR)
 #endif
+#ifndef cpu_has_rw_llb
+#define cpu_has_rw_llb         (cpu_data[0].options & MIPS_CPU_RW_LLB)
+#endif
 
 /*
  * For the moment we don't consider R6000 and R8000 so we can assume that
 #endif
 #endif
 
+#ifndef cpu_has_mips_1
+# define cpu_has_mips_1                (!cpu_has_mips_r6)
+#endif
 #ifndef cpu_has_mips_2
 # define cpu_has_mips_2                (cpu_data[0].isa_level & MIPS_CPU_ISA_II)
 #endif
 #ifndef cpu_has_mips32r2
 # define cpu_has_mips32r2      (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R2)
 #endif
+#ifndef cpu_has_mips32r6
+# define cpu_has_mips32r6      (cpu_data[0].isa_level & MIPS_CPU_ISA_M32R6)
+#endif
 #ifndef cpu_has_mips64r1
 # define cpu_has_mips64r1      (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R1)
 #endif
 #ifndef cpu_has_mips64r2
 # define cpu_has_mips64r2      (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R2)
 #endif
+#ifndef cpu_has_mips64r6
+# define cpu_has_mips64r6      (cpu_data[0].isa_level & MIPS_CPU_ISA_M64R6)
+#endif
 
 /*
  * Shortcuts ...
 #define cpu_has_mips_4_5_r     (cpu_has_mips_4 | cpu_has_mips_5_r)
 #define cpu_has_mips_5_r       (cpu_has_mips_5 | cpu_has_mips_r)
 
-#define cpu_has_mips_4_5_r2    (cpu_has_mips_4_5 | cpu_has_mips_r2)
+#define cpu_has_mips_4_5_r2_r6 (cpu_has_mips_4_5 | cpu_has_mips_r2 | \
+                                cpu_has_mips_r6)
 
-#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2)
-#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2)
+#define cpu_has_mips32 (cpu_has_mips32r1 | cpu_has_mips32r2 | cpu_has_mips32r6)
+#define cpu_has_mips64 (cpu_has_mips64r1 | cpu_has_mips64r2 | cpu_has_mips64r6)
 #define cpu_has_mips_r1 (cpu_has_mips32r1 | cpu_has_mips64r1)
 #define cpu_has_mips_r2 (cpu_has_mips32r2 | cpu_has_mips64r2)
+#define cpu_has_mips_r6        (cpu_has_mips32r6 | cpu_has_mips64r6)
 #define cpu_has_mips_r (cpu_has_mips32r1 | cpu_has_mips32r2 | \
-                        cpu_has_mips64r1 | cpu_has_mips64r2)
+                        cpu_has_mips32r6 | cpu_has_mips64r1 | \
+                        cpu_has_mips64r2 | cpu_has_mips64r6)
+
+/* MIPSR2 and MIPSR6 have a lot of similarities */
+#define cpu_has_mips_r2_r6     (cpu_has_mips_r2 | cpu_has_mips_r6)
 
 #ifndef cpu_has_mips_r2_exec_hazard
-#define cpu_has_mips_r2_exec_hazard cpu_has_mips_r2
+#define cpu_has_mips_r2_exec_hazard (cpu_has_mips_r2 | cpu_has_mips_r6)
 #endif
 
 /*
index a6c9ccb33c5c9a35ceaac1485fb10f02a97da4b2..c3f4f2d2e1088459b2aa10c6292e5de76665d5bc 100644 (file)
@@ -84,6 +84,11 @@ struct cpuinfo_mips {
         * (shifted by _CACHE_SHIFT)
         */
        unsigned int            writecombine;
+       /*
+        * Simple counter to prevent enabling HTW in nested
+        * htw_start/htw_stop calls
+        */
+       unsigned int            htw_seq;
 } __attribute__((aligned(SMP_CACHE_BYTES)));
 
 extern struct cpuinfo_mips cpu_data[];
index b4e2bd87df5030457b2f397b7e52565b7d2dc4b5..8245875f8b33be3156cfe42b63b0f1d788afebf8 100644 (file)
@@ -54,6 +54,13 @@ static inline int __pure __get_cpu_type(const int cpu_type)
        case CPU_M5150:
 #endif
 
+#if defined(CONFIG_SYS_HAS_CPU_MIPS32_R2) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS32_R6) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS64_R2) || \
+    defined(CONFIG_SYS_HAS_CPU_MIPS64_R6)
+       case CPU_QEMU_GENERIC:
+#endif
+
 #ifdef CONFIG_SYS_HAS_CPU_MIPS64_R1
        case CPU_5KC:
        case CPU_5KE:
index 33866fce4d633a177636c1d386e180bdbd8ef02a..15687234d70a6bcd1dd211543fa8ac494ca44902 100644 (file)
@@ -93,6 +93,7 @@
  * These are the PRID's for when 23:16 == PRID_COMP_MIPS
  */
 
+#define PRID_IMP_QEMU_GENERIC  0x0000
 #define PRID_IMP_4KC           0x8000
 #define PRID_IMP_5KC           0x8100
 #define PRID_IMP_20KC          0x8200
@@ -312,6 +313,8 @@ enum cpu_type_enum {
        CPU_LOONGSON3, CPU_CAVIUM_OCTEON, CPU_CAVIUM_OCTEON_PLUS,
        CPU_CAVIUM_OCTEON2, CPU_CAVIUM_OCTEON3, CPU_XLR, CPU_XLP,
 
+       CPU_QEMU_GENERIC,
+
        CPU_LAST
 };
 
@@ -329,11 +332,14 @@ enum cpu_type_enum {
 #define MIPS_CPU_ISA_M32R2     0x00000020
 #define MIPS_CPU_ISA_M64R1     0x00000040
 #define MIPS_CPU_ISA_M64R2     0x00000080
+#define MIPS_CPU_ISA_M32R6     0x00000100
+#define MIPS_CPU_ISA_M64R6     0x00000200
 
 #define MIPS_CPU_ISA_32BIT (MIPS_CPU_ISA_II | MIPS_CPU_ISA_M32R1 | \
-       MIPS_CPU_ISA_M32R2)
+       MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M32R6)
 #define MIPS_CPU_ISA_64BIT (MIPS_CPU_ISA_III | MIPS_CPU_ISA_IV | \
-       MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)
+       MIPS_CPU_ISA_V | MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2 | \
+       MIPS_CPU_ISA_M64R6)
 
 /*
  * CPU Option encodings
@@ -370,6 +376,7 @@ enum cpu_type_enum {
 #define MIPS_CPU_RIXIEX                0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
 #define MIPS_CPU_MAAR          0x400000000ull /* MAAR(I) registers are present */
 #define MIPS_CPU_FRE           0x800000000ull /* FRE & UFE bits implemented */
+#define MIPS_CPU_RW_LLB                0x1000000000ull /* LLADDR/LLB writes are allowed */
 
 /*
  * CPU ASE encodings
index ae6fedcb0060f22c69091480f55228dd2ef4383d..94105d3f58f4882849643cfcb8668857198f2117 100644 (file)
@@ -26,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size)
                "       sc      %0, %1                                  \n"
                "       beqz    %0, 1b                                  \n"
                "       .set    mips0                                   \n"
-               : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr)
-               : GCC_OFF12_ASM() (*virt_addr));
+               : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*virt_addr)
+               : GCC_OFF_SMALL_ASM() (*virt_addr));
 
                virt_addr++;
        }
index eb4d95de619c5dca7543ed551267e43799f4ca11..535f196ffe02da7ad769ab0b7053b4dd5253dd82 100644 (file)
@@ -417,13 +417,15 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
 struct arch_elf_state {
        int fp_abi;
        int interp_fp_abi;
-       int overall_abi;
+       int overall_fp_mode;
 };
 
+#define MIPS_ABI_FP_UNKNOWN    (-1)    /* Unknown FP ABI (kernel internal) */
+
 #define INIT_ARCH_ELF_STATE {                  \
-       .fp_abi = -1,                           \
-       .interp_fp_abi = -1,                    \
-       .overall_abi = -1,                      \
+       .fp_abi = MIPS_ABI_FP_UNKNOWN,          \
+       .interp_fp_abi = MIPS_ABI_FP_UNKNOWN,   \
+       .overall_fp_mode = -1,                  \
 }
 
 extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
index affebb78f5d6573dbf97f62630ad1e6a35026602..dd083e999b08a14ffdbef46d5f5f4a0731e9f18e 100644 (file)
@@ -68,7 +68,8 @@ static inline int __enable_fpu(enum fpu_mode mode)
                goto fr_common;
 
        case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
+#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6) \
+      || defined(CONFIG_64BIT))
                /* we only have a 32-bit FPU */
                return SIGFPE;
 #endif
index ef9987a61d88c62e79e38e3c406a3bbcc42e1a23..1de190bdfb9c9fef90b8976503242e09b97e7f60 100644 (file)
                "       "__UA_ADDR "\t2b, 4b                    \n"     \
                "       .previous                               \n"     \
                : "=r" (ret), "=&r" (oldval),                           \
-                 "=" GCC_OFF12_ASM() (*uaddr)                          \
-               : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg),      \
+                 "=" GCC_OFF_SMALL_ASM() (*uaddr)                              \
+               : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg),  \
                  "i" (-EFAULT)                                         \
                : "memory");                                            \
        } else if (cpu_has_llsc) {                                      \
                __asm__ __volatile__(                                   \
                "       .set    push                            \n"     \
                "       .set    noat                            \n"     \
-               "       .set    arch=r4000                      \n"     \
+               "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "1:     "user_ll("%1", "%4")" # __futex_atomic_op\n"    \
                "       .set    mips0                           \n"     \
                "       " insn  "                               \n"     \
-               "       .set    arch=r4000                      \n"     \
+               "       .set    "MIPS_ISA_ARCH_LEVEL"           \n"     \
                "2:     "user_sc("$1", "%2")"                   \n"     \
                "       beqz    $1, 1b                          \n"     \
                __WEAK_LLSC_MB                                          \
@@ -74,8 +74,8 @@
                "       "__UA_ADDR "\t2b, 4b                    \n"     \
                "       .previous                               \n"     \
                : "=r" (ret), "=&r" (oldval),                           \
-                 "=" GCC_OFF12_ASM() (*uaddr)                          \
-               : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg),      \
+                 "=" GCC_OFF_SMALL_ASM() (*uaddr)                              \
+               : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg),  \
                  "i" (-EFAULT)                                         \
                : "memory");                                            \
        } else                                                          \
@@ -174,8 +174,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                "       "__UA_ADDR "\t1b, 4b                            \n"
                "       "__UA_ADDR "\t2b, 4b                            \n"
                "       .previous                                       \n"
-               : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
-               : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+               : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+               : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
                  "i" (-EFAULT)
                : "memory");
        } else if (cpu_has_llsc) {
@@ -183,12 +183,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                "# futex_atomic_cmpxchg_inatomic                        \n"
                "       .set    push                                    \n"
                "       .set    noat                                    \n"
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "1:     "user_ll("%1", "%3")"                           \n"
                "       bne     %1, %z4, 3f                             \n"
                "       .set    mips0                                   \n"
                "       move    $1, %z5                                 \n"
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "2:     "user_sc("$1", "%2")"                           \n"
                "       beqz    $1, 1b                                  \n"
                __WEAK_LLSC_MB
@@ -203,8 +203,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                "       "__UA_ADDR "\t1b, 4b                            \n"
                "       "__UA_ADDR "\t2b, 4b                            \n"
                "       .previous                                       \n"
-               : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
-               : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
+               : "+r" (ret), "=&r" (val), "=" GCC_OFF_SMALL_ASM() (*uaddr)
+               : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
                  "i" (-EFAULT)
                : "memory");
        } else
index 4be1a57cdbb055915c862cff1fb55966b8214726..71a986e9b694d672823a87c17260f8fd39882e24 100644 (file)
@@ -25,8 +25,6 @@ struct gio_driver {
 
        int  (*probe)(struct gio_device *, const struct gio_device_id *);
        void (*remove)(struct gio_device *);
-       int  (*suspend)(struct gio_device *, pm_message_t);
-       int  (*resume)(struct gio_device *);
        void (*shutdown)(struct gio_device *);
 
        struct device_driver driver;
index e3ee92d4dbe750c7aa05a5488f7443cdd64fb387..4087b47ad1cbea16050e968a4daf9f0531b4aa6d 100644 (file)
@@ -11,6 +11,7 @@
 #define _ASM_HAZARDS_H
 
 #include <linux/stringify.h>
+#include <asm/compiler.h>
 
 #define ___ssnop                                                       \
        sll     $0, $0, 1
@@ -21,7 +22,7 @@
 /*
  * TLB hazards
  */
-#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
 
 /*
  * MIPSR2 defines ehb for hazard avoidance
@@ -58,7 +59,7 @@ do {                                                                  \
        unsigned long tmp;                                              \
                                                                        \
        __asm__ __volatile__(                                           \
-       "       .set    mips64r2                                \n"     \
+       "       .set "MIPS_ISA_LEVEL"                           \n"     \
        "       dla     %0, 1f                                  \n"     \
        "       jr.hb   %0                                      \n"     \
        "       .set    mips0                                   \n"     \
@@ -132,7 +133,7 @@ do {                                                                        \
 
 #define instruction_hazard()                                           \
 do {                                                                   \
-       if (cpu_has_mips_r2)                                            \
+       if (cpu_has_mips_r2_r6)                                         \
                __instruction_hazard();                                 \
 } while (0)
 
@@ -240,7 +241,7 @@ do {                                                                        \
 
 #define __disable_fpu_hazard
 
-#elif defined(CONFIG_CPU_MIPSR2)
+#elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 
 #define __enable_fpu_hazard                                            \
        ___ehb
index 0fa5fdcd1f01f273da67b1530aa67ff5ee1646c6..d60cc68fa31e4f908685dd9c9f332e8df3d36242 100644 (file)
 
 #include <linux/compiler.h>
 #include <linux/stringify.h>
+#include <asm/compiler.h>
 #include <asm/hazards.h>
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined (CONFIG_CPU_MIPSR6)
 
 static inline void arch_local_irq_disable(void)
 {
@@ -118,7 +119,7 @@ void arch_local_irq_disable(void);
 unsigned long arch_local_irq_save(void);
 void arch_local_irq_restore(unsigned long flags);
 void __arch_local_irq_restore(unsigned long flags);
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 static inline void arch_local_irq_enable(void)
 {
@@ -126,7 +127,7 @@ static inline void arch_local_irq_enable(void)
        "       .set    push                                            \n"
        "       .set    reorder                                         \n"
        "       .set    noat                                            \n"
-#if   defined(CONFIG_CPU_MIPSR2)
+#if   defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        "       ei                                                      \n"
 #else
        "       mfc0    $1,$12                                          \n"
index 46dfc3c1fd49777a41b3158c77b1fc5c49955087..8feaed62a2abab216da39e8dd99786f2340d3f60 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/bitops.h>
 #include <linux/atomic.h>
 #include <asm/cmpxchg.h>
+#include <asm/compiler.h>
 #include <asm/war.h>
 
 typedef struct
@@ -47,7 +48,7 @@ static __inline__ long local_add_return(long i, local_t * l)
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "1:"    __LL    "%1, %2         # local_add_return      \n"
                "       addu    %0, %1, %3                              \n"
                        __SC    "%0, %2                                 \n"
@@ -92,7 +93,7 @@ static __inline__ long local_sub_return(long i, local_t * l)
                unsigned long temp;
 
                __asm__ __volatile__(
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "1:"    __LL    "%1, %2         # local_sub_return      \n"
                "       subu    %0, %1, %3                              \n"
                        __SC    "%0, %2                                 \n"
index 1668ee57acb90b82e679b50b4d687940d0a9f39f..cf92fe7339952b43f0a8585bc9a51d351fca0f50 100644 (file)
@@ -8,11 +8,10 @@
 #ifndef __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
 #define __ASM_MACH_CAVIUM_OCTEON_KERNEL_ENTRY_H
 
-
-#define CP0_CYCLE_COUNTER $9, 6
 #define CP0_CVMCTL_REG $9, 7
 #define CP0_CVMMEMCTL_REG $11,7
 #define CP0_PRID_REG $15, 0
+#define CP0_DCACHE_ERR_REG $27, 1
 #define CP0_PRID_OCTEON_PASS1 0x000d0000
 #define CP0_PRID_OCTEON_CN30XX 0x000d0200
 
        # Needed for octeon specific memcpy
        or  v0, v0, 0x5001
        xor v0, v0, 0x1001
-       # Read the processor ID register
-       mfc0 v1, CP0_PRID_REG
-       # Disable instruction prefetching (Octeon Pass1 errata)
-       or  v0, v0, 0x2000
-       # Skip reenable of prefetching for Octeon Pass1
-       beq v1, CP0_PRID_OCTEON_PASS1, skip
-       nop
-       # Reenable instruction prefetching, not on Pass1
-       xor v0, v0, 0x2000
-       # Strip off pass number off of processor id
-       srl v1, 8
-       sll v1, 8
-       # CN30XX needs some extra stuff turned off for better performance
-       bne v1, CP0_PRID_OCTEON_CN30XX, skip
-       nop
-       # CN30XX Use random Icache replacement
-       or  v0, v0, 0x400
-       # CN30XX Disable instruction prefetching
-       or  v0, v0, 0x2000
-skip:
        # First clear off CvmCtl[IPPCI] bit and move the performance
        # counters interrupt to IRQ 6
-       li      v1, ~(7 << 7)
+       dli     v1, ~(7 << 7)
        and     v0, v0, v1
        ori     v0, v0, (6 << 7)
+
+       mfc0    v1, CP0_PRID_REG
+       and     t1, v1, 0xfff8
+       xor     t1, t1, 0x9000          # 63-P1
+       beqz    t1, 4f
+       and     t1, v1, 0xfff8
+       xor     t1, t1, 0x9008          # 63-P2
+       beqz    t1, 4f
+       and     t1, v1, 0xfff8
+       xor     t1, t1, 0x9100          # 68-P1
+       beqz    t1, 4f
+       and     t1, v1, 0xff00
+       xor     t1, t1, 0x9200          # 66-PX
+       bnez    t1, 5f                  # Skip WAR for others.
+       and     t1, v1, 0x00ff
+       slti    t1, t1, 2               # 66-P1.2 and later good.
+       beqz    t1, 5f
+
+4:     # core-16057 work around
+       or      v0, v0, 0x2000          # Set IPREF bit.
+
+5:     # No core-16057 work around
        # Write the cavium control register
        dmtc0   v0, CP0_CVMCTL_REG
        sync
        # Flush dcache after config change
        cache   9, 0($0)
+       # Zero all of CVMSEG to make sure parity is correct
+       dli     v0, CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE
+       dsll    v0, 7
+       beqz    v0, 2f
+1:     dsubu   v0, 8
+       sd      $0, -32768(v0)
+       bnez    v0, 1b
+2:
+       mfc0    v0, CP0_PRID_REG
+       bbit0   v0, 15, 1f
+       # OCTEON II or better have bit 15 set.  Clear the error bits.
+       and     t1, v0, 0xff00
+       dli     v0, 0x9500
+       bge     t1, v0, 1f  # OCTEON III has no DCACHE_ERR_REG COP0
+       dli     v0, 0x27
+       dmtc0   v0, CP0_DCACHE_ERR_REG
+1:
        # Get my core id
        rdhwr   v0, $0
        # Jump the master to kernel_entry
index eb72b35cf04b5bf4f45dc6668b763b05fdce0bdf..35c80be92207beef97ebc536b31ae93d4f0a133a 100644 (file)
@@ -22,4 +22,7 @@
 #define R10000_LLSC_WAR                        0
 #define MIPS34K_MISSED_ITLB_WAR                0
 
+#define CAVIUM_OCTEON_DCACHE_PREFETCH_WAR      \
+       OCTEON_IS_MODEL(OCTEON_CN6XXX)
+
 #endif /* __ASM_MIPS_MACH_CAVIUM_OCTEON_WAR_H */
index 2e54b4bff5cf59e744b9cb3a83e44bca747a9136..90dbe43c8d272d2cc5a95d1e4a2fbf20a4f1d4a6 100644 (file)
@@ -85,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr,
        "       "__beqz"%0, 1b                          \n"
        "       nop                                     \n"
        "       .set    pop                             \n"
-       : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-       : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr));
+       : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+       : "ir" (~mask), "ir" (value), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -106,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr,
        "       "__beqz"%0, 1b                          \n"
        "       nop                                     \n"
        "       .set    pop                             \n"
-       : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-       : "ir" (mask), GCC_OFF12_ASM() (*addr));
+       : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+       : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -127,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr,
        "       "__beqz"%0, 1b                          \n"
        "       nop                                     \n"
        "       .set    pop                             \n"
-       : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-       : "ir" (~mask), GCC_OFF12_ASM() (*addr));
+       : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+       : "ir" (~mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -148,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr,
        "       "__beqz"%0, 1b                          \n"
        "       nop                                     \n"
        "       .set    pop                             \n"
-       : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
-       : "ir" (mask), GCC_OFF12_ASM() (*addr));
+       : "=&r" (temp), "=" GCC_OFF_SMALL_ASM() (*addr)
+       : "ir" (mask), GCC_OFF_SMALL_ASM() (*addr));
 }
 
 /*
@@ -220,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
        "       .set    arch=r4000                      \n"     \
        "1:     ll      %0, %1  #custom_read_reg32      \n"     \
        "       .set    pop                             \n"     \
-       : "=r" (tmp), "=" GCC_OFF12_ASM() (*address)            \
-       : GCC_OFF12_ASM() (*address))
+       : "=r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)                \
+       : GCC_OFF_SMALL_ASM() (*address))
 
 #define custom_write_reg32(address, tmp)                       \
        __asm__ __volatile__(                                   \
@@ -231,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
        "       "__beqz"%0, 1b                          \n"     \
        "       nop                                     \n"     \
        "       .set    pop                             \n"     \
-       : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address)           \
-       : "0" (tmp), GCC_OFF12_ASM() (*address))
+       : "=&r" (tmp), "=" GCC_OFF_SMALL_ASM() (*address)               \
+       : "0" (tmp), GCC_OFF_SMALL_ASM() (*address))
 
 #endif /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
new file mode 100644 (file)
index 0000000..60570f2
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ */
+
+#ifndef __ASM_MIPS_R2_TO_R6_EMUL_H
+#define __ASM_MIPS_R2_TO_R6_EMUL_H
+
+struct mips_r2_emulator_stats {
+       u64 movs;
+       u64 hilo;
+       u64 muls;
+       u64 divs;
+       u64 dsps;
+       u64 bops;
+       u64 traps;
+       u64 fpus;
+       u64 loads;
+       u64 stores;
+       u64 llsc;
+       u64 dsemul;
+};
+
+struct mips_r2br_emulator_stats {
+       u64 jrs;
+       u64 bltzl;
+       u64 bgezl;
+       u64 bltzll;
+       u64 bgezll;
+       u64 bltzall;
+       u64 bgezall;
+       u64 bltzal;
+       u64 bgezal;
+       u64 beql;
+       u64 bnel;
+       u64 blezl;
+       u64 bgtzl;
+};
+
+#ifdef CONFIG_DEBUG_FS
+
+#define MIPS_R2_STATS(M)                                               \
+do {                                                                   \
+       u32 nir;                                                        \
+       int err;                                                        \
+                                                                       \
+       preempt_disable();                                              \
+       __this_cpu_inc(mipsr2emustats.M);                               \
+       err = __get_user(nir, (u32 __user *)regs->cp0_epc);             \
+       if (!err) {                                                     \
+               if (nir == BREAK_MATH)                                  \
+                       __this_cpu_inc(mipsr2bdemustats.M);             \
+       }                                                               \
+       preempt_enable();                                               \
+} while (0)
+
+#define MIPS_R2BR_STATS(M)                                     \
+do {                                                           \
+       preempt_disable();                                      \
+       __this_cpu_inc(mipsr2bremustats.M);                     \
+       preempt_enable();                                       \
+} while (0)
+
+#else
+
+#define MIPS_R2_STATS(M)          do { } while (0)
+#define MIPS_R2BR_STATS(M)        do { } while (0)
+
+#endif /* CONFIG_DEBUG_FS */
+
+struct r2_decoder_table {
+       u32     mask;
+       u32     code;
+       int     (*func)(struct pt_regs *regs, u32 inst);
+};
+
+
+extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+                         const char *str);
+
+#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
+static int mipsr2_emulation;
+static __maybe_unused int mipsr2_decoder(struct pt_regs *regs, u32 inst) { return 0; };
+#else
+/* MIPS R2 Emulator ON/OFF */
+extern int mipsr2_emulation;
+extern int mipsr2_decoder(struct pt_regs *regs, u32 inst);
+#endif /* CONFIG_MIPSR2_TO_R6_EMULATOR */
+
+#define NO_R6EMU       (cpu_has_mips_r6 && !mipsr2_emulation)
+
+#endif /* __ASM_MIPS_R2_TO_R6_EMUL_H */
index 5b720d8c2745b2e8b891f38c5256db82a1232bc5..fef004434096596ebb54ecf8834dc04e9f3fd4c4 100644 (file)
 #define MIPS_CONF5_NF          (_ULCAST_(1) << 0)
 #define MIPS_CONF5_UFR         (_ULCAST_(1) << 2)
 #define MIPS_CONF5_MRP         (_ULCAST_(1) << 3)
+#define MIPS_CONF5_LLB         (_ULCAST_(1) << 4)
 #define MIPS_CONF5_MVH         (_ULCAST_(1) << 5)
 #define MIPS_CONF5_FRE         (_ULCAST_(1) << 8)
 #define MIPS_CONF5_UFE         (_ULCAST_(1) << 9)
@@ -1127,6 +1128,8 @@ do {                                                                      \
 #define write_c0_config6(val)  __write_32bit_c0_register($16, 6, val)
 #define write_c0_config7(val)  __write_32bit_c0_register($16, 7, val)
 
+#define read_c0_lladdr()       __read_ulong_c0_register($17, 0)
+#define write_c0_lladdr(val)   __write_ulong_c0_register($17, 0, val)
 #define read_c0_maar()         __read_ulong_c0_register($17, 1)
 #define write_c0_maar(val)     __write_ulong_c0_register($17, 1, val)
 #define read_c0_maari()                __read_32bit_c0_register($17, 2)
@@ -1909,6 +1912,7 @@ __BUILD_SET_C0(config5)
 __BUILD_SET_C0(intcontrol)
 __BUILD_SET_C0(intctl)
 __BUILD_SET_C0(srsmap)
+__BUILD_SET_C0(pagegrain)
 __BUILD_SET_C0(brcm_config_0)
 __BUILD_SET_C0(brcm_bus_pll)
 __BUILD_SET_C0(brcm_reset)
index c436138945a84dca9f1f311e54a801090c396853..1afa1f986df8c42a06e5f34355de8f8f1325c4bf 100644 (file)
@@ -1,9 +1,12 @@
 #ifndef __ASM_MMU_H
 #define __ASM_MMU_H
 
+#include <linux/atomic.h>
+
 typedef struct {
        unsigned long asid[NR_CPUS];
        void *vdso;
+       atomic_t fp_mode_switching;
 } mm_context_t;
 
 #endif /* __ASM_MMU_H */
index 2f82568a3ee4cf2caa9e55f3e4b1d2e25eb26090..45914b59824c11a14a9ec76e6fe016dccc3eaaaf 100644 (file)
@@ -25,7 +25,6 @@ do {                                                                  \
        if (cpu_has_htw) {                                              \
                write_c0_pwbase(pgd);                                   \
                back_to_back_c0_hazard();                               \
-               htw_reset();                                            \
        }                                                               \
 } while (0)
 
@@ -132,6 +131,8 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
        for_each_possible_cpu(i)
                cpu_context(i, mm) = 0;
 
+       atomic_set(&mm->context.fp_mode_switching, 0);
+
        return 0;
 }
 
@@ -142,6 +143,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        unsigned long flags;
        local_irq_save(flags);
 
+       htw_stop();
        /* Check if our ASID is of an older version and thus invalid */
        if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
                get_new_mmu_context(next, cpu);
@@ -154,6 +156,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
         */
        cpumask_clear_cpu(cpu, mm_cpumask(prev));
        cpumask_set_cpu(cpu, mm_cpumask(next));
+       htw_start();
 
        local_irq_restore(flags);
 }
@@ -180,6 +183,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
 
        local_irq_save(flags);
 
+       htw_stop();
        /* Unconditionally get a new ASID.  */
        get_new_mmu_context(next, cpu);
 
@@ -189,6 +193,7 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
        /* mark mmu ownership change */
        cpumask_clear_cpu(cpu, mm_cpumask(prev));
        cpumask_set_cpu(cpu, mm_cpumask(next));
+       htw_start();
 
        local_irq_restore(flags);
 }
@@ -203,6 +208,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
        unsigned long flags;
 
        local_irq_save(flags);
+       htw_stop();
 
        if (cpumask_test_cpu(cpu, mm_cpumask(mm)))  {
                get_new_mmu_context(mm, cpu);
@@ -211,6 +217,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu)
                /* will get a new context next time */
                cpu_context(cpu, mm) = 0;
        }
+       htw_start();
        local_irq_restore(flags);
 }
 
index 800fe578dc99a5312aa78445a8420dc9c466b811..0aaf9a01ea505bad4754b6d56f7eda8f19b9de6f 100644 (file)
@@ -88,10 +88,14 @@ search_module_dbetables(unsigned long addr)
 #define MODULE_PROC_FAMILY "MIPS32_R1 "
 #elif defined CONFIG_CPU_MIPS32_R2
 #define MODULE_PROC_FAMILY "MIPS32_R2 "
+#elif defined CONFIG_CPU_MIPS32_R6
+#define MODULE_PROC_FAMILY "MIPS32_R6 "
 #elif defined CONFIG_CPU_MIPS64_R1
 #define MODULE_PROC_FAMILY "MIPS64_R1 "
 #elif defined CONFIG_CPU_MIPS64_R2
 #define MODULE_PROC_FAMILY "MIPS64_R2 "
+#elif defined CONFIG_CPU_MIPS64_R6
+#define MODULE_PROC_FAMILY "MIPS64_R6 "
 #elif defined CONFIG_CPU_R3000
 #define MODULE_PROC_FAMILY "R3000 "
 #elif defined CONFIG_CPU_TX39XX
index 75739c83f07e74bb26ab5dbc0fd32c34401c838b..8d05d90698238e4deb6bc0b649a7929e3d0e2b76 100644 (file)
@@ -275,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
                " lbu   %[ticket], %[now_serving]\n"
                "4:\n"
                ".set pop\n" :
-               [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
+               [ticket_ptr] "=" GCC_OFF_SMALL_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
                [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
                [my_ticket] "=r"(my_ticket)
            );
diff --git a/arch/mips/include/asm/octeon/cvmx-rst-defs.h b/arch/mips/include/asm/octeon/cvmx-rst-defs.h
new file mode 100644 (file)
index 0000000..0c9c3e7
--- /dev/null
@@ -0,0 +1,306 @@
+/***********************license start***************
+ * Author: Cavium Inc.
+ *
+ * Contact: support@cavium.com
+ * This file is part of the OCTEON SDK
+ *
+ * Copyright (c) 2003-2014 Cavium Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT.  See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this file; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * or visit http://www.gnu.org/licenses/.
+ *
+ * This file may also be available under a different license from Cavium.
+ * Contact Cavium Inc. for more information
+ ***********************license end**************************************/
+
+#ifndef __CVMX_RST_DEFS_H__
+#define __CVMX_RST_DEFS_H__
+
+#define CVMX_RST_BOOT (CVMX_ADD_IO_SEG(0x0001180006001600ull))
+#define CVMX_RST_CFG (CVMX_ADD_IO_SEG(0x0001180006001610ull))
+#define CVMX_RST_CKILL (CVMX_ADD_IO_SEG(0x0001180006001638ull))
+#define CVMX_RST_CTLX(offset) (CVMX_ADD_IO_SEG(0x0001180006001640ull) + ((offset) & 3) * 8)
+#define CVMX_RST_DELAY (CVMX_ADD_IO_SEG(0x0001180006001608ull))
+#define CVMX_RST_ECO (CVMX_ADD_IO_SEG(0x00011800060017B8ull))
+#define CVMX_RST_INT (CVMX_ADD_IO_SEG(0x0001180006001628ull))
+#define CVMX_RST_OCX (CVMX_ADD_IO_SEG(0x0001180006001618ull))
+#define CVMX_RST_POWER_DBG (CVMX_ADD_IO_SEG(0x0001180006001708ull))
+#define CVMX_RST_PP_POWER (CVMX_ADD_IO_SEG(0x0001180006001700ull))
+#define CVMX_RST_SOFT_PRSTX(offset) (CVMX_ADD_IO_SEG(0x00011800060016C0ull) + ((offset) & 3) * 8)
+#define CVMX_RST_SOFT_RST (CVMX_ADD_IO_SEG(0x0001180006001680ull))
+
+union cvmx_rst_boot {
+       uint64_t u64;
+       struct cvmx_rst_boot_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t chipkill:1;
+               uint64_t jtcsrdis:1;
+               uint64_t ejtagdis:1;
+               uint64_t romen:1;
+               uint64_t ckill_ppdis:1;
+               uint64_t jt_tstmode:1;
+               uint64_t vrm_err:1;
+               uint64_t reserved_37_56:20;
+               uint64_t c_mul:7;
+               uint64_t pnr_mul:6;
+               uint64_t reserved_21_23:3;
+               uint64_t lboot_oci:3;
+               uint64_t lboot_ext:6;
+               uint64_t lboot:10;
+               uint64_t rboot:1;
+               uint64_t rboot_pin:1;
+#else
+               uint64_t rboot_pin:1;
+               uint64_t rboot:1;
+               uint64_t lboot:10;
+               uint64_t lboot_ext:6;
+               uint64_t lboot_oci:3;
+               uint64_t reserved_21_23:3;
+               uint64_t pnr_mul:6;
+               uint64_t c_mul:7;
+               uint64_t reserved_37_56:20;
+               uint64_t vrm_err:1;
+               uint64_t jt_tstmode:1;
+               uint64_t ckill_ppdis:1;
+               uint64_t romen:1;
+               uint64_t ejtagdis:1;
+               uint64_t jtcsrdis:1;
+               uint64_t chipkill:1;
+#endif
+       } s;
+       struct cvmx_rst_boot_s cn70xx;
+       struct cvmx_rst_boot_s cn70xxp1;
+       struct cvmx_rst_boot_s cn78xx;
+};
+
+union cvmx_rst_cfg {
+       uint64_t u64;
+       struct cvmx_rst_cfg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t bist_delay:58;
+               uint64_t reserved_3_5:3;
+               uint64_t cntl_clr_bist:1;
+               uint64_t warm_clr_bist:1;
+               uint64_t soft_clr_bist:1;
+#else
+               uint64_t soft_clr_bist:1;
+               uint64_t warm_clr_bist:1;
+               uint64_t cntl_clr_bist:1;
+               uint64_t reserved_3_5:3;
+               uint64_t bist_delay:58;
+#endif
+       } s;
+       struct cvmx_rst_cfg_s cn70xx;
+       struct cvmx_rst_cfg_s cn70xxp1;
+       struct cvmx_rst_cfg_s cn78xx;
+};
+
+union cvmx_rst_ckill {
+       uint64_t u64;
+       struct cvmx_rst_ckill_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_47_63:17;
+               uint64_t timer:47;
+#else
+               uint64_t timer:47;
+               uint64_t reserved_47_63:17;
+#endif
+       } s;
+       struct cvmx_rst_ckill_s cn70xx;
+       struct cvmx_rst_ckill_s cn70xxp1;
+       struct cvmx_rst_ckill_s cn78xx;
+};
+
+union cvmx_rst_ctlx {
+       uint64_t u64;
+       struct cvmx_rst_ctlx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_10_63:54;
+               uint64_t prst_link:1;
+               uint64_t rst_done:1;
+               uint64_t rst_link:1;
+               uint64_t host_mode:1;
+               uint64_t reserved_4_5:2;
+               uint64_t rst_drv:1;
+               uint64_t rst_rcv:1;
+               uint64_t rst_chip:1;
+               uint64_t rst_val:1;
+#else
+               uint64_t rst_val:1;
+               uint64_t rst_chip:1;
+               uint64_t rst_rcv:1;
+               uint64_t rst_drv:1;
+               uint64_t reserved_4_5:2;
+               uint64_t host_mode:1;
+               uint64_t rst_link:1;
+               uint64_t rst_done:1;
+               uint64_t prst_link:1;
+               uint64_t reserved_10_63:54;
+#endif
+       } s;
+       struct cvmx_rst_ctlx_s cn70xx;
+       struct cvmx_rst_ctlx_s cn70xxp1;
+       struct cvmx_rst_ctlx_s cn78xx;
+};
+
+union cvmx_rst_delay {
+       uint64_t u64;
+       struct cvmx_rst_delay_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_32_63:32;
+               uint64_t warm_rst_dly:16;
+               uint64_t soft_rst_dly:16;
+#else
+               uint64_t soft_rst_dly:16;
+               uint64_t warm_rst_dly:16;
+               uint64_t reserved_32_63:32;
+#endif
+       } s;
+       struct cvmx_rst_delay_s cn70xx;
+       struct cvmx_rst_delay_s cn70xxp1;
+       struct cvmx_rst_delay_s cn78xx;
+};
+
+union cvmx_rst_eco {
+       uint64_t u64;
+       struct cvmx_rst_eco_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_32_63:32;
+               uint64_t eco_rw:32;
+#else
+               uint64_t eco_rw:32;
+               uint64_t reserved_32_63:32;
+#endif
+       } s;
+       struct cvmx_rst_eco_s cn78xx;
+};
+
+union cvmx_rst_int {
+       uint64_t u64;
+       struct cvmx_rst_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_12_63:52;
+               uint64_t perst:4;
+               uint64_t reserved_4_7:4;
+               uint64_t rst_link:4;
+#else
+               uint64_t rst_link:4;
+               uint64_t reserved_4_7:4;
+               uint64_t perst:4;
+               uint64_t reserved_12_63:52;
+#endif
+       } s;
+       struct cvmx_rst_int_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_11_63:53;
+               uint64_t perst:3;
+               uint64_t reserved_3_7:5;
+               uint64_t rst_link:3;
+#else
+               uint64_t rst_link:3;
+               uint64_t reserved_3_7:5;
+               uint64_t perst:3;
+               uint64_t reserved_11_63:53;
+#endif
+       } cn70xx;
+       struct cvmx_rst_int_cn70xx cn70xxp1;
+       struct cvmx_rst_int_s cn78xx;
+};
+
+union cvmx_rst_ocx {
+       uint64_t u64;
+       struct cvmx_rst_ocx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_3_63:61;
+               uint64_t rst_link:3;
+#else
+               uint64_t rst_link:3;
+               uint64_t reserved_3_63:61;
+#endif
+       } s;
+       struct cvmx_rst_ocx_s cn78xx;
+};
+
+union cvmx_rst_power_dbg {
+       uint64_t u64;
+       struct cvmx_rst_power_dbg_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_3_63:61;
+               uint64_t str:3;
+#else
+               uint64_t str:3;
+               uint64_t reserved_3_63:61;
+#endif
+       } s;
+       struct cvmx_rst_power_dbg_s cn78xx;
+};
+
+union cvmx_rst_pp_power {
+       uint64_t u64;
+       struct cvmx_rst_pp_power_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_48_63:16;
+               uint64_t gate:48;
+#else
+               uint64_t gate:48;
+               uint64_t reserved_48_63:16;
+#endif
+       } s;
+       struct cvmx_rst_pp_power_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_4_63:60;
+               uint64_t gate:4;
+#else
+               uint64_t gate:4;
+               uint64_t reserved_4_63:60;
+#endif
+       } cn70xx;
+       struct cvmx_rst_pp_power_cn70xx cn70xxp1;
+       struct cvmx_rst_pp_power_s cn78xx;
+};
+
+union cvmx_rst_soft_prstx {
+       uint64_t u64;
+       struct cvmx_rst_soft_prstx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_1_63:63;
+               uint64_t soft_prst:1;
+#else
+               uint64_t soft_prst:1;
+               uint64_t reserved_1_63:63;
+#endif
+       } s;
+       struct cvmx_rst_soft_prstx_s cn70xx;
+       struct cvmx_rst_soft_prstx_s cn70xxp1;
+       struct cvmx_rst_soft_prstx_s cn78xx;
+};
+
+union cvmx_rst_soft_rst {
+       uint64_t u64;
+       struct cvmx_rst_soft_rst_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+               uint64_t reserved_1_63:63;
+               uint64_t soft_rst:1;
+#else
+               uint64_t soft_rst:1;
+               uint64_t reserved_1_63:63;
+#endif
+       } s;
+       struct cvmx_rst_soft_rst_s cn70xx;
+       struct cvmx_rst_soft_rst_s cn70xxp1;
+       struct cvmx_rst_soft_rst_s cn78xx;
+};
+
+#endif
index e8a1c2fd52cdd8f3b65ffe68bf3eefa3628fbaa2..92b377e36dac260f9100d9178fe2252bed85e061 100644 (file)
@@ -45,6 +45,7 @@
  */
 
 #define OCTEON_FAMILY_MASK     0x00ffff00
+#define OCTEON_PRID_MASK       0x00ffffff
 
 /* Flag bits in top byte */
 /* Ignores revision in model checks */
 #define OM_MATCH_6XXX_FAMILY_MODELS    0x40000000
 /* Match all cnf7XXX Octeon models. */
 #define OM_MATCH_F7XXX_FAMILY_MODELS   0x80000000
+/* Match all cn7XXX Octeon models. */
+#define OM_MATCH_7XXX_FAMILY_MODELS     0x10000000
+#define OM_MATCH_FAMILY_MODELS         (OM_MATCH_5XXX_FAMILY_MODELS |  \
+                                        OM_MATCH_6XXX_FAMILY_MODELS |  \
+                                        OM_MATCH_F7XXX_FAMILY_MODELS | \
+                                        OM_MATCH_7XXX_FAMILY_MODELS)
+/*
+ * CN7XXX models with new revision encoding
+ */
+
+#define OCTEON_CN73XX_PASS1_0  0x000d9700
+#define OCTEON_CN73XX          (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN73XX_PASS1_X  (OCTEON_CN73XX_PASS1_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN70XX_PASS1_0  0x000d9600
+#define OCTEON_CN70XX_PASS1_1  0x000d9601
+#define OCTEON_CN70XX_PASS1_2  0x000d9602
+
+#define OCTEON_CN70XX_PASS2_0  0x000d9608
+
+#define OCTEON_CN70XX          (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN70XX_PASS1_X  (OCTEON_CN70XX_PASS1_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN70XX_PASS2_X  (OCTEON_CN70XX_PASS2_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN71XX          OCTEON_CN70XX
+
+#define OCTEON_CN78XX_PASS1_0  0x000d9500
+#define OCTEON_CN78XX_PASS1_1  0x000d9501
+#define OCTEON_CN78XX_PASS2_0  0x000d9508
+
+#define OCTEON_CN78XX          (OCTEON_CN78XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN78XX_PASS1_X  (OCTEON_CN78XX_PASS1_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+#define OCTEON_CN78XX_PASS2_X  (OCTEON_CN78XX_PASS2_0 | \
+                                OM_IGNORE_MINOR_REVISION)
+
+#define OCTEON_CN76XX          (0x000d9540 | OM_CHECK_SUBMODEL)
 
 /*
  * CNF7XXX models with new revision encoding
  */
 #define OCTEON_CNF71XX_PASS1_0 0x000d9400
+#define OCTEON_CNF71XX_PASS1_1  0x000d9401
 
 #define OCTEON_CNF71XX         (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
 #define OCTEON_CNF71XX_PASS1_X (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN68XX_PASS1_1  0x000d9101
 #define OCTEON_CN68XX_PASS1_2  0x000d9102
 #define OCTEON_CN68XX_PASS2_0  0x000d9108
+#define OCTEON_CN68XX_PASS2_1   0x000d9109
+#define OCTEON_CN68XX_PASS2_2   0x000d910a
 
 #define OCTEON_CN68XX          (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN68XX_PASS1_X  (OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN63XX_PASS1_X  (OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN63XX_PASS2_X  (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
 
+/* CN62XX is same as CN63XX with 1 MB cache */
+#define OCTEON_CN62XX           OCTEON_CN63XX
+
 #define OCTEON_CN61XX_PASS1_0  0x000d9300
+#define OCTEON_CN61XX_PASS1_1   0x000d9301
 
 #define OCTEON_CN61XX          (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN61XX_PASS1_X  (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 
+/* CN60XX is same as CN61XX with 512 KB cache */
+#define OCTEON_CN60XX           OCTEON_CN61XX
+
 /*
  * CN5XXX models with new revision encoding
  */
 #define OCTEON_CN58XX_PASS2_2  0x000d030a
 #define OCTEON_CN58XX_PASS2_3  0x000d030b
 
-#define OCTEON_CN58XX          (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CN58XX          (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_REVISION)
 #define OCTEON_CN58XX_PASS1_X  (OCTEON_CN58XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN58XX_PASS2_X  (OCTEON_CN58XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
 #define OCTEON_CN58XX_PASS1    OCTEON_CN58XX_PASS1_X
 #define OCTEON_CN3XXX          (OCTEON_CN58XX_PASS1_0 | OM_MATCH_PREVIOUS_MODELS | OM_IGNORE_REVISION)
 #define OCTEON_CN5XXX          (OCTEON_CN58XX_PASS1_0 | OM_MATCH_5XXX_FAMILY_MODELS)
 #define OCTEON_CN6XXX          (OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
-
-/* These are used to cover entire families of OCTEON processors */
-#define OCTEON_FAM_1           (OCTEON_CN3XXX)
-#define OCTEON_FAM_PLUS                (OCTEON_CN5XXX)
-#define OCTEON_FAM_1_PLUS      (OCTEON_FAM_PLUS | OM_MATCH_PREVIOUS_MODELS)
-#define OCTEON_FAM_2           (OCTEON_CN6XXX)
+#define OCTEON_CNF7XXX         (OCTEON_CNF71XX_PASS1_0 | \
+                                OM_MATCH_F7XXX_FAMILY_MODELS)
+#define OCTEON_CN7XXX          (OCTEON_CN78XX_PASS1_0 | \
+                                OM_MATCH_7XXX_FAMILY_MODELS)
 
 /* The revision byte (low byte) has two different encodings.
  * CN3XXX:
  *     <4>:   alternate package
  *     <3:0>: revision
  *
- * CN5XXX:
+ * CN5XXX and older models:
  *
  *     bits
  *     <7>:   reserved (0)
 /* CN5XXX and later use different layout of bits in the revision ID field */
 #define OCTEON_58XX_FAMILY_MASK             OCTEON_38XX_FAMILY_MASK
 #define OCTEON_58XX_FAMILY_REV_MASK  0x00ffff3f
-#define OCTEON_58XX_MODEL_MASK      0x00ffffc0
+#define OCTEON_58XX_MODEL_MASK      0x00ffff40
 #define OCTEON_58XX_MODEL_REV_MASK   (OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
-#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00fffff8)
+#define OCTEON_58XX_MODEL_MINOR_REV_MASK (OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38)
 #define OCTEON_5XXX_MODEL_MASK      0x00ff0fc0
 
-/* forward declarations */
 static inline uint32_t cvmx_get_proc_id(void) __attribute__ ((pure));
 static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
 
 #define __OCTEON_MATCH_MASK__(x, y, z) (((x) & (z)) == ((y) & (z)))
 
+/*
+ * __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)
+ * returns true if chip_model is identical or belong to the OCTEON
+ * model group specified in arg_model.
+ */
 /* NOTE: This for internal use only! */
 #define __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)             \
 ((((arg_model & OCTEON_38XX_FAMILY_MASK) < OCTEON_CN58XX_PASS1_0)  && ( \
@@ -286,11 +339,18 @@ static inline uint64_t cvmx_read_csr(uint64_t csr_addr);
                ((((arg_model) & (OM_FLAG_MASK)) == OM_IGNORE_REVISION) \
                        && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_FAMILY_MASK)) || \
                ((((arg_model) & (OM_FLAG_MASK)) == OM_CHECK_SUBMODEL)  \
-                       && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_REV_MASK)) || \
+                       && __OCTEON_MATCH_MASK__((chip_model), (arg_model), OCTEON_58XX_MODEL_MASK)) || \
                ((((arg_model) & (OM_MATCH_5XXX_FAMILY_MODELS)) == OM_MATCH_5XXX_FAMILY_MODELS) \
-                       && ((chip_model) >= OCTEON_CN58XX_PASS1_0) && ((chip_model) < OCTEON_CN63XX_PASS1_0)) || \
+                       && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN58XX_PASS1_0) \
+                       && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \
                ((((arg_model) & (OM_MATCH_6XXX_FAMILY_MODELS)) == OM_MATCH_6XXX_FAMILY_MODELS) \
-                       && ((chip_model) >= OCTEON_CN63XX_PASS1_0)) ||  \
+                       && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) \
+                       && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \
+               ((((arg_model) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == OM_MATCH_F7XXX_FAMILY_MODELS) \
+                       && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) \
+                       && ((chip_model & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \
+               ((((arg_model) & (OM_MATCH_7XXX_FAMILY_MODELS)) == OM_MATCH_7XXX_FAMILY_MODELS) \
+                       && ((chip_model & OCTEON_PRID_MASK) >= OCTEON_CN78XX_PASS1_0)) || \
                ((((arg_model) & (OM_MATCH_PREVIOUS_MODELS)) == OM_MATCH_PREVIOUS_MODELS) \
                        && (((chip_model) & OCTEON_58XX_MODEL_MASK) < ((arg_model) & OCTEON_58XX_MODEL_MASK))) \
                )))
@@ -300,14 +360,6 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
 {
        uint32_t cpuid = cvmx_get_proc_id();
 
-       /*
-        * Check for special case of mismarked 3005 samples. We only
-        * need to check if the sub model isn't being ignored
-        */
-       if ((model & OM_CHECK_SUBMODEL) == OM_CHECK_SUBMODEL) {
-               if (cpuid == OCTEON_CN3010_PASS1 && (cvmx_read_csr(0x80011800800007B8ull) & (1ull << 34)))
-                       cpuid |= 0x10;
-       }
        return __OCTEON_IS_MODEL_COMPILE__(model, cpuid);
 }
 
@@ -326,10 +378,21 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
 #define OCTEON_IS_COMMON_BINARY() 1
 #undef OCTEON_MODEL
 
+#define OCTEON_IS_OCTEON1()    OCTEON_IS_MODEL(OCTEON_CN3XXX)
+#define OCTEON_IS_OCTEONPLUS() OCTEON_IS_MODEL(OCTEON_CN5XXX)
+#define OCTEON_IS_OCTEON2()                                            \
+       (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
+
+#define OCTEON_IS_OCTEON3()    OCTEON_IS_MODEL(OCTEON_CN7XXX)
+
+#define OCTEON_IS_OCTEON1PLUS()        (OCTEON_IS_OCTEON1() || OCTEON_IS_OCTEONPLUS())
+
 const char *__init octeon_model_get_string(uint32_t chip_id);
 
 /*
  * Return the octeon family, i.e., ProcessorID of the PrID register.
+ *
+ * @return the octeon family on success, ((unint32_t)-1) on error.
  */
 static inline uint32_t cvmx_get_octeon_family(void)
 {
index 6dfefd2d5cdfd7a39c0679da021a7338c298e490..0415965708565c7ddf8d5fe3451f67e907ce9a9c 100644 (file)
@@ -9,6 +9,7 @@
 #define __ASM_OCTEON_OCTEON_H
 
 #include <asm/octeon/cvmx.h>
+#include <asm/bitfield.h>
 
 extern uint64_t octeon_bootmem_alloc_range_phys(uint64_t size,
                                                uint64_t alignment,
@@ -53,6 +54,7 @@ extern void octeon_io_clk_delay(unsigned long);
 #define OCTOEN_SERIAL_LEN      20
 
 struct octeon_boot_descriptor {
+#ifdef __BIG_ENDIAN_BITFIELD
        /* Start of block referenced by assembly code - do not change! */
        uint32_t desc_version;
        uint32_t desc_size;
@@ -104,77 +106,149 @@ struct octeon_boot_descriptor {
        uint8_t mac_addr_base[6];
        uint8_t mac_addr_count;
        uint64_t cvmx_desc_vaddr;
+#else
+       uint32_t desc_size;
+       uint32_t desc_version;
+       uint64_t stack_top;
+       uint64_t heap_base;
+       uint64_t heap_end;
+       /* Only used by bootloader */
+       uint64_t entry_point;
+       uint64_t desc_vaddr;
+       /* End of This block referenced by assembly code - do not change! */
+       uint32_t stack_size;
+       uint32_t exception_base_addr;
+       uint32_t argc;
+       uint32_t heap_size;
+       /*
+        * Argc count for application.
+        * Warning low bit scrambled in little-endian.
+        */
+       uint32_t argv[OCTEON_ARGV_MAX_ARGS];
+
+#define  BOOT_FLAG_INIT_CORE           (1 << 0)
+#define  OCTEON_BL_FLAG_DEBUG          (1 << 1)
+#define  OCTEON_BL_FLAG_NO_MAGIC       (1 << 2)
+       /* If set, use uart1 for console */
+#define  OCTEON_BL_FLAG_CONSOLE_UART1  (1 << 3)
+       /* If set, use PCI console */
+#define  OCTEON_BL_FLAG_CONSOLE_PCI    (1 << 4)
+       /* Call exit on break on serial port */
+#define  OCTEON_BL_FLAG_BREAK          (1 << 5)
+
+       uint32_t core_mask;
+       uint32_t flags;
+       /* physical address of free memory descriptor block. */
+       uint32_t phy_mem_desc_addr;
+       /* DRAM size in megabyes. */
+       uint32_t dram_size;
+       /* CPU clock speed, in hz. */
+       uint32_t eclock_hz;
+       /* used to pass flags from app to debugger. */
+       uint32_t debugger_flags_base_addr;
+       /* SPI4 clock in hz. */
+       uint32_t spi_clock_hz;
+       /* DRAM clock speed, in hz. */
+       uint32_t dclock_hz;
+       uint8_t chip_rev_minor;
+       uint8_t chip_rev_major;
+       uint16_t chip_type;
+       uint8_t board_rev_minor;
+       uint8_t board_rev_major;
+       uint16_t board_type;
+
+       uint64_t unused1[4]; /* Not even filled in by bootloader. */
+
+       uint64_t cvmx_desc_vaddr;
+#endif
 };
 
 union octeon_cvmemctl {
        uint64_t u64;
        struct {
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t tlbbist:1;
+               __BITFIELD_FIELD(uint64_t tlbbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t l1cbist:1;
+               __BITFIELD_FIELD(uint64_t l1cbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t l1dbist:1;
+               __BITFIELD_FIELD(uint64_t l1dbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t dcmbist:1;
+               __BITFIELD_FIELD(uint64_t dcmbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t ptgbist:1;
+               __BITFIELD_FIELD(uint64_t ptgbist:1,
                /* RO 1 = BIST fail, 0 = BIST pass */
-               uint64_t wbfbist:1;
+               __BITFIELD_FIELD(uint64_t wbfbist:1,
                /* Reserved */
-               uint64_t reserved:22;
+               __BITFIELD_FIELD(uint64_t reserved:17,
+               /* OCTEON II - TLB replacement policy: 0 = bitmask LRU; 1 = NLU.
+                * This field selects between the TLB replacement policies:
+                * bitmask LRU or NLU. Bitmask LRU maintains a mask of
+                * recently used TLB entries and avoids them as new entries
+                * are allocated. NLU simply guarantees that the next
+                * allocation is not the last used TLB entry. */
+               __BITFIELD_FIELD(uint64_t tlbnlu:1,
+               /* OCTEON II - Selects the bit in the counter used for
+                * releasing a PAUSE. This counter trips every 2(8+PAUSETIME)
+                * cycles. If not already released, the cnMIPS II core will
+                * always release a given PAUSE instruction within
+                * 2(8+PAUSETIME). If the counter trip happens to line up,
+                * the cnMIPS II core may release the PAUSE instantly. */
+               __BITFIELD_FIELD(uint64_t pausetime:3,
+               /* OCTEON II - This field is an extension of
+                * CvmMemCtl[DIDTTO] */
+               __BITFIELD_FIELD(uint64_t didtto2:1,
                /* R/W If set, marked write-buffer entries time out
                 * the same as as other entries; if clear, marked
                 * write-buffer entries use the maximum timeout. */
-               uint64_t dismarkwblongto:1;
+               __BITFIELD_FIELD(uint64_t dismarkwblongto:1,
                /* R/W If set, a merged store does not clear the
                 * write-buffer entry timeout state. */
-               uint64_t dismrgclrwbto:1;
+               __BITFIELD_FIELD(uint64_t dismrgclrwbto:1,
                /* R/W Two bits that are the MSBs of the resultant
                 * CVMSEG LM word location for an IOBDMA. The other 8
                 * bits come from the SCRADDR field of the IOBDMA. */
-               uint64_t iobdmascrmsb:2;
+               __BITFIELD_FIELD(uint64_t iobdmascrmsb:2,
                /* R/W If set, SYNCWS and SYNCS only order marked
                 * stores; if clear, SYNCWS and SYNCS only order
                 * unmarked stores. SYNCWSMARKED has no effect when
                 * DISSYNCWS is set. */
-               uint64_t syncwsmarked:1;
+               __BITFIELD_FIELD(uint64_t syncwsmarked:1,
                /* R/W If set, SYNCWS acts as SYNCW and SYNCS acts as
                 * SYNC. */
-               uint64_t dissyncws:1;
+               __BITFIELD_FIELD(uint64_t dissyncws:1,
                /* R/W If set, no stall happens on write buffer
                 * full. */
-               uint64_t diswbfst:1;
+               __BITFIELD_FIELD(uint64_t diswbfst:1,
                /* R/W If set (and SX set), supervisor-level
                 * loads/stores can use XKPHYS addresses with
                 * VA<48>==0 */
-               uint64_t xkmemenas:1;
+               __BITFIELD_FIELD(uint64_t xkmemenas:1,
                /* R/W If set (and UX set), user-level loads/stores
                 * can use XKPHYS addresses with VA<48>==0 */
-               uint64_t xkmemenau:1;
+               __BITFIELD_FIELD(uint64_t xkmemenau:1,
                /* R/W If set (and SX set), supervisor-level
                 * loads/stores can use XKPHYS addresses with
                 * VA<48>==1 */
-               uint64_t xkioenas:1;
+               __BITFIELD_FIELD(uint64_t xkioenas:1,
                /* R/W If set (and UX set), user-level loads/stores
                 * can use XKPHYS addresses with VA<48>==1 */
-               uint64_t xkioenau:1;
+               __BITFIELD_FIELD(uint64_t xkioenau:1,
                /* R/W If set, all stores act as SYNCW (NOMERGE must
                 * be set when this is set) RW, reset to 0. */
-               uint64_t allsyncw:1;
+               __BITFIELD_FIELD(uint64_t allsyncw:1,
                /* R/W If set, no stores merge, and all stores reach
                 * the coherent bus in order. */
-               uint64_t nomerge:1;
+               __BITFIELD_FIELD(uint64_t nomerge:1,
                /* R/W Selects the bit in the counter used for DID
                 * time-outs 0 = 231, 1 = 230, 2 = 229, 3 =
                 * 214. Actual time-out is between 1x and 2x this
                 * interval. For example, with DIDTTO=3, expiration
                 * interval is between 16K and 32K. */
-               uint64_t didtto:2;
+               __BITFIELD_FIELD(uint64_t didtto:2,
                /* R/W If set, the (mem) CSR clock never turns off. */
-               uint64_t csrckalwys:1;
+               __BITFIELD_FIELD(uint64_t csrckalwys:1,
                /* R/W If set, mclk never turns off. */
-               uint64_t mclkalwys:1;
+               __BITFIELD_FIELD(uint64_t mclkalwys:1,
                /* R/W Selects the bit in the counter used for write
                 * buffer flush time-outs (WBFLT+11) is the bit
                 * position in an internal counter used to determine
@@ -182,25 +256,26 @@ union octeon_cvmemctl {
                 * 2x this interval. For example, with WBFLT = 0, a
                 * write buffer expires between 2K and 4K cycles after
                 * the write buffer entry is allocated. */
-               uint64_t wbfltime:3;
+               __BITFIELD_FIELD(uint64_t wbfltime:3,
                /* R/W If set, do not put Istream in the L2 cache. */
-               uint64_t istrnol2:1;
+               __BITFIELD_FIELD(uint64_t istrnol2:1,
                /* R/W The write buffer threshold. */
-               uint64_t wbthresh:4;
+               __BITFIELD_FIELD(uint64_t wbthresh:4,
                /* Reserved */
-               uint64_t reserved2:2;
+               __BITFIELD_FIELD(uint64_t reserved2:2,
                /* R/W If set, CVMSEG is available for loads/stores in
                 * kernel/debug mode. */
-               uint64_t cvmsegenak:1;
+               __BITFIELD_FIELD(uint64_t cvmsegenak:1,
                /* R/W If set, CVMSEG is available for loads/stores in
                 * supervisor mode. */
-               uint64_t cvmsegenas:1;
+               __BITFIELD_FIELD(uint64_t cvmsegenas:1,
                /* R/W If set, CVMSEG is available for loads/stores in
                 * user mode. */
-               uint64_t cvmsegenau:1;
+               __BITFIELD_FIELD(uint64_t cvmsegenau:1,
                /* R/W Size of local memory in cache blocks, 54 (6912
                 * bytes) is max legal value. */
-               uint64_t lmemsz:6;
+               __BITFIELD_FIELD(uint64_t lmemsz:6,
+               ;)))))))))))))))))))))))))))))))))
        } s;
 };
 
@@ -224,6 +299,19 @@ static inline void octeon_npi_write32(uint64_t address, uint32_t val)
        cvmx_read64_uint32(address ^ 4);
 }
 
+/* Octeon multiplier save/restore routines from octeon_switch.S */
+void octeon_mult_save(void);
+void octeon_mult_restore(void);
+void octeon_mult_save_end(void);
+void octeon_mult_restore_end(void);
+void octeon_mult_save3(void);
+void octeon_mult_save3_end(void);
+void octeon_mult_save2(void);
+void octeon_mult_save2_end(void);
+void octeon_mult_restore3(void);
+void octeon_mult_restore3_end(void);
+void octeon_mult_restore2(void);
+void octeon_mult_restore2_end(void);
 
 /**
  * Read a 32bit value from the Octeon NPI register space
index 69529624a0050713b120ecfd3b430a601dfb567b..193b4c6b7541a774f3a0ccfbaa8bb7f7add55b83 100644 (file)
@@ -121,6 +121,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
 }
 #endif
 
+#ifdef CONFIG_PCI_DOMAINS
 #define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
 
 static inline int pci_proc_domain(struct pci_bus *bus)
@@ -128,6 +129,7 @@ static inline int pci_proc_domain(struct pci_bus *bus)
        struct pci_controller *hose = bus->sysdata;
        return hose->need_domain_info;
 }
+#endif /* CONFIG_PCI_DOMAINS */
 
 #endif /* __KERNEL__ */
 
index fc807aa5ec8d7593ef8bda4c13e184ae31d79a70..91747c282bb3fb3f1f6560cb5a722089aac9741e 100644 (file)
@@ -35,7 +35,7 @@
 #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 
 /*
- * The following bits are directly used by the TLB hardware
+ * The following bits are implemented by the TLB hardware
  */
 #define _PAGE_GLOBAL_SHIFT     0
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
 #define _PAGE_MODIFIED_SHIFT   (_PAGE_ACCESSED_SHIFT + 1)
 #define _PAGE_MODIFIED         (1 << _PAGE_MODIFIED_SHIFT)
 
-#define _PAGE_SILENT_READ      _PAGE_VALID
-#define _PAGE_SILENT_WRITE     _PAGE_DIRTY
-
 #define _PFN_SHIFT             (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
 
 #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 
 /*
- * The following are implemented by software
+ * The following bits are implemented in software
  */
-#define _PAGE_PRESENT_SHIFT    0
-#define _PAGE_PRESENT          (1 <<  _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT       1
-#define _PAGE_READ             (1 <<  _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT      2
-#define _PAGE_WRITE            (1 <<  _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT   3
-#define _PAGE_ACCESSED         (1 <<  _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT   4
-#define _PAGE_MODIFIED         (1 <<  _PAGE_MODIFIED_SHIFT)
+#define _PAGE_PRESENT_SHIFT    (0)
+#define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
+#define _PAGE_READ_SHIFT       (_PAGE_PRESENT_SHIFT + 1)
+#define _PAGE_READ             (1 << _PAGE_READ_SHIFT)
+#define _PAGE_WRITE_SHIFT      (_PAGE_READ_SHIFT + 1)
+#define _PAGE_WRITE            (1 << _PAGE_WRITE_SHIFT)
+#define _PAGE_ACCESSED_SHIFT   (_PAGE_WRITE_SHIFT + 1)
+#define _PAGE_ACCESSED         (1 << _PAGE_ACCESSED_SHIFT)
+#define _PAGE_MODIFIED_SHIFT   (_PAGE_ACCESSED_SHIFT + 1)
+#define _PAGE_MODIFIED         (1 << _PAGE_MODIFIED_SHIFT)
 
 /*
- * And these are the hardware TLB bits
+ * The following bits are implemented by the TLB hardware
  */
-#define _PAGE_GLOBAL_SHIFT     8
-#define _PAGE_GLOBAL           (1 <<  _PAGE_GLOBAL_SHIFT)
-#define _PAGE_VALID_SHIFT      9
-#define _PAGE_VALID            (1 <<  _PAGE_VALID_SHIFT)
-#define _PAGE_SILENT_READ      (1 <<  _PAGE_VALID_SHIFT)       /* synonym  */
-#define _PAGE_DIRTY_SHIFT      10
+#define _PAGE_GLOBAL_SHIFT     (_PAGE_MODIFIED_SHIFT + 4)
+#define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
+#define _PAGE_VALID_SHIFT      (_PAGE_GLOBAL_SHIFT + 1)
+#define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
+#define _PAGE_DIRTY_SHIFT      (_PAGE_VALID_SHIFT + 1)
 #define _PAGE_DIRTY            (1 << _PAGE_DIRTY_SHIFT)
-#define _PAGE_SILENT_WRITE     (1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_UNCACHED_SHIFT  11
+#define _CACHE_UNCACHED_SHIFT  (_PAGE_DIRTY_SHIFT + 1)
 #define _CACHE_UNCACHED                (1 << _CACHE_UNCACHED_SHIFT)
-#define _CACHE_MASK            (1 << _CACHE_UNCACHED_SHIFT)
+#define _CACHE_MASK            _CACHE_UNCACHED
 
-#else /* 'Normal' r4K case */
+#define _PFN_SHIFT             PAGE_SHIFT
+
+#else
 /*
  * When using the RI/XI bit support, we have 13 bits of flags below
  * the physical address. The RI/XI bits are placed such that a SRL 5
 
 /*
  * The following bits are implemented in software
- *
- * _PAGE_READ / _PAGE_READ_SHIFT should be unused if cpu_has_rixi.
  */
-#define _PAGE_PRESENT_SHIFT    (0)
+#define _PAGE_PRESENT_SHIFT    0
 #define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
 #define _PAGE_READ_SHIFT       (cpu_has_rixi ? _PAGE_PRESENT_SHIFT : _PAGE_PRESENT_SHIFT + 1)
 #define _PAGE_READ ({BUG_ON(cpu_has_rixi); 1 << _PAGE_READ_SHIFT; })
 /* huge tlb page */
 #define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT + 1)
 #define _PAGE_HUGE             (1 << _PAGE_HUGE_SHIFT)
-#else
-#define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT)
-#define _PAGE_HUGE             ({BUG(); 1; })  /* Dummy value */
-#endif
-
-#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
-/* huge tlb page */
 #define _PAGE_SPLITTING_SHIFT  (_PAGE_HUGE_SHIFT + 1)
 #define _PAGE_SPLITTING                (1 << _PAGE_SPLITTING_SHIFT)
 #else
+#define _PAGE_HUGE_SHIFT       (_PAGE_MODIFIED_SHIFT)
+#define _PAGE_HUGE             ({BUG(); 1; })  /* Dummy value */
 #define _PAGE_SPLITTING_SHIFT  (_PAGE_HUGE_SHIFT)
 #define _PAGE_SPLITTING                ({BUG(); 1; })  /* Dummy value */
 #endif
 
 #define _PAGE_GLOBAL_SHIFT     (_PAGE_NO_READ_SHIFT + 1)
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
-
 #define _PAGE_VALID_SHIFT      (_PAGE_GLOBAL_SHIFT + 1)
 #define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
-/* synonym                */
-#define _PAGE_SILENT_READ      (_PAGE_VALID)
-
-/* The MIPS dirty bit     */
 #define _PAGE_DIRTY_SHIFT      (_PAGE_VALID_SHIFT + 1)
 #define _PAGE_DIRTY            (1 << _PAGE_DIRTY_SHIFT)
-#define _PAGE_SILENT_WRITE     (_PAGE_DIRTY)
-
 #define _CACHE_SHIFT           (_PAGE_DIRTY_SHIFT + 1)
 #define _CACHE_MASK            (7 << _CACHE_SHIFT)
 
 
 #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
 
-#ifndef _PFN_SHIFT
-#define _PFN_SHIFT                 PAGE_SHIFT
-#endif
+#define _PAGE_SILENT_READ      _PAGE_VALID
+#define _PAGE_SILENT_WRITE     _PAGE_DIRTY
+
 #define _PFN_MASK              (~((1 << (_PFN_SHIFT)) - 1))
 
 #ifndef _PAGE_NO_READ
 #ifndef _PAGE_NO_EXEC
 #define _PAGE_NO_EXEC ({BUG(); 0; })
 #endif
-#ifndef _PAGE_GLOBAL_SHIFT
-#define _PAGE_GLOBAL_SHIFT ilog2(_PAGE_GLOBAL)
-#endif
 
 
 #ifndef __ASSEMBLY__
@@ -266,8 +246,9 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
 #endif
 
 #define __READABLE     (_PAGE_SILENT_READ | _PAGE_ACCESSED | (cpu_has_rixi ? 0 : _PAGE_READ))
-#define __WRITEABLE    (_PAGE_WRITE | _PAGE_SILENT_WRITE | _PAGE_MODIFIED)
+#define __WRITEABLE    (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
 
-#define _PAGE_CHG_MASK (_PFN_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | _CACHE_MASK)
+#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED |      \
+                        _PFN_MASK | _CACHE_MASK)
 
 #endif /* _ASM_PGTABLE_BITS_H */
index 583ff42154794da86c5cc9b64ae9e81e4507e057..bef782c4a44bd33b20f24dda9422f401e2324acc 100644 (file)
@@ -99,29 +99,35 @@ extern void paging_init(void);
 
 #define htw_stop()                                                     \
 do {                                                                   \
-       if (cpu_has_htw)                                                \
-               write_c0_pwctl(read_c0_pwctl() &                        \
-                              ~(1 << MIPS_PWCTL_PWEN_SHIFT));          \
+       unsigned long flags;                                            \
+                                                                       \
+       if (cpu_has_htw) {                                              \
+               local_irq_save(flags);                                  \
+               if(!raw_current_cpu_data.htw_seq++) {                   \
+                       write_c0_pwctl(read_c0_pwctl() &                \
+                                      ~(1 << MIPS_PWCTL_PWEN_SHIFT));  \
+                       back_to_back_c0_hazard();                       \
+               }                                                       \
+               local_irq_restore(flags);                               \
+       }                                                               \
 } while(0)
 
 #define htw_start()                                                    \
 do {                                                                   \
-       if (cpu_has_htw)                                                \
-               write_c0_pwctl(read_c0_pwctl() |                        \
-                              (1 << MIPS_PWCTL_PWEN_SHIFT));           \
-} while(0)
-
-
-#define htw_reset()                                                    \
-do {                                                                   \
+       unsigned long flags;                                            \
+                                                                       \
        if (cpu_has_htw) {                                              \
-               htw_stop();                                             \
-               back_to_back_c0_hazard();                               \
-               htw_start();                                            \
-               back_to_back_c0_hazard();                               \
+               local_irq_save(flags);                                  \
+               if (!--raw_current_cpu_data.htw_seq) {                  \
+                       write_c0_pwctl(read_c0_pwctl() |                \
+                                      (1 << MIPS_PWCTL_PWEN_SHIFT));   \
+                       back_to_back_c0_hazard();                       \
+               }                                                       \
+               local_irq_restore(flags);                               \
        }                                                               \
 } while(0)
 
+
 extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
        pte_t pteval);
 
@@ -153,12 +159,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
 {
        pte_t null = __pte(0);
 
+       htw_stop();
        /* Preserve global status for the pair */
        if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
                null.pte_low = null.pte_high = _PAGE_GLOBAL;
 
        set_pte_at(mm, addr, ptep, null);
-       htw_reset();
+       htw_start();
 }
 #else
 
@@ -188,6 +195,7 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
 
 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 {
+       htw_stop();
 #if !defined(CONFIG_CPU_R3000) && !defined(CONFIG_CPU_TX39XX)
        /* Preserve global status for the pair */
        if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
@@ -195,7 +203,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
        else
 #endif
                set_pte_at(mm, addr, ptep, __pte(0));
-       htw_reset();
+       htw_start();
 }
 #endif
 
@@ -334,7 +342,7 @@ static inline pte_t pte_mkyoung(pte_t pte)
        return pte;
 }
 
-#ifdef _PAGE_HUGE
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
 static inline int pte_huge(pte_t pte)  { return pte_val(pte) & _PAGE_HUGE; }
 
 static inline pte_t pte_mkhuge(pte_t pte)
@@ -342,7 +350,7 @@ static inline pte_t pte_mkhuge(pte_t pte)
        pte_val(pte) |= _PAGE_HUGE;
        return pte;
 }
-#endif /* _PAGE_HUGE */
+#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
 #endif
 static inline int pte_special(pte_t pte)       { return 0; }
 static inline pte_t pte_mkspecial(pte_t pte)   { return pte; }
index f1df4cb4a286dc8f76f1186ca9b2b1c439ce1764..b5dcbee01fd7a52641584cbbf8b80848f7c6f4b9 100644 (file)
@@ -54,9 +54,7 @@ extern unsigned int vced_count, vcei_count;
 #define TASK_SIZE      0x7fff8000UL
 #endif
 
-#ifdef __KERNEL__
 #define STACK_TOP_MAX  TASK_SIZE
-#endif
 
 #define TASK_IS_32BIT_ADDR 1
 
@@ -73,11 +71,7 @@ extern unsigned int vced_count, vcei_count;
 #define TASK_SIZE32    0x7fff8000UL
 #define TASK_SIZE64    0x10000000000UL
 #define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
-
-#ifdef __KERNEL__
 #define STACK_TOP_MAX  TASK_SIZE64
-#endif
-
 
 #define TASK_SIZE_OF(tsk)                                              \
        (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
@@ -211,6 +205,8 @@ struct octeon_cop2_state {
        unsigned long   cop2_gfm_poly;
        /* DMFC2 rt, 0x025A; DMFC2 rt, 0x025B - Pass2 */
        unsigned long   cop2_gfm_result[2];
+       /* DMFC2 rt, 0x24F, DMFC2 rt, 0x50, OCTEON III */
+       unsigned long   cop2_sha3[2];
 };
 #define COP2_INIT                                              \
        .cp2                    = {0,},
@@ -399,4 +395,15 @@ unsigned long get_wchan(struct task_struct *p);
 
 #endif
 
+/*
+ * Functions & macros implementing the PR_GET_FP_MODE & PR_SET_FP_MODE options
+ * to the prctl syscall.
+ */
+extern int mips_get_process_fp_mode(struct task_struct *task);
+extern int mips_set_process_fp_mode(struct task_struct *task,
+                                   unsigned int value);
+
+#define GET_FP_MODE(task)              mips_get_process_fp_mode(task)
+#define SET_FP_MODE(task,value)                mips_set_process_fp_mode(task, value)
+
 #endif /* _ASM_PROCESSOR_H */
index eaa26270a5e574bae3db56202a22434c4c4b930f..8ebc2aa5f3e1331fd0c84f2ba54675a4128a06a2 100644 (file)
@@ -24,13 +24,6 @@ struct boot_param_header;
 extern void __dt_setup_arch(void *bph);
 extern int __dt_register_buses(const char *bus0, const char *bus1);
 
-#define dt_setup_arch(sym)                                             \
-({                                                                     \
-       extern char __dtb_##sym##_begin[];                              \
-                                                                       \
-       __dt_setup_arch(__dtb_##sym##_begin);                           \
-})
-
 #else /* CONFIG_OF */
 static inline void device_tree_init(void) { }
 #endif /* CONFIG_OF */
index fc783f843bdc4272ccecfeddab877044a418703e..ffc320389f40a011ac6c66ef9c453fce23e38c34 100644 (file)
@@ -40,8 +40,8 @@ struct pt_regs {
        unsigned long cp0_cause;
        unsigned long cp0_epc;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
-       unsigned long long mpl[3];        /* MTM{0,1,2} */
-       unsigned long long mtp[3];        /* MTP{0,1,2} */
+       unsigned long long mpl[6];        /* MTM{0-5} */
+       unsigned long long mtp[6];        /* MTP{0-5} */
 #endif
 } __aligned(8);
 
index e293a8d89a6da590a3e46be5fccc60f1fc2c1983..1b22d2da88a1ec1b76e42dfde3cbff5cb5b422c2 100644 (file)
@@ -14,6 +14,7 @@
 
 #include <asm/asm.h>
 #include <asm/cacheops.h>
+#include <asm/compiler.h>
 #include <asm/cpu-features.h>
 #include <asm/cpu-type.h>
 #include <asm/mipsmtregs.h>
@@ -39,7 +40,7 @@ extern void (*r4k_blast_icache)(void);
        __asm__ __volatile__(                                           \
        "       .set    push                                    \n"     \
        "       .set    noreorder                               \n"     \
-       "       .set    arch=r4000                              \n"     \
+       "       .set "MIPS_ISA_ARCH_LEVEL"                      \n"     \
        "       cache   %0, %1                                  \n"     \
        "       .set    pop                                     \n"     \
        :                                                               \
@@ -147,7 +148,7 @@ static inline void flush_scache_line(unsigned long addr)
        __asm__ __volatile__(                                   \
        "       .set    push                    \n"             \
        "       .set    noreorder               \n"             \
-       "       .set    arch=r4000              \n"             \
+       "       .set "MIPS_ISA_ARCH_LEVEL"      \n"             \
        "1:     cache   %0, (%1)                \n"             \
        "2:     .set    pop                     \n"             \
        "       .section __ex_table,\"a\"       \n"             \
@@ -218,6 +219,7 @@ static inline void invalidate_tcache_page(unsigned long addr)
        cache_op(Page_Invalidate_T, addr);
 }
 
+#ifndef CONFIG_CPU_MIPSR6
 #define cache16_unroll32(base,op)                                      \
        __asm__ __volatile__(                                           \
        "       .set push                                       \n"     \
@@ -322,6 +324,150 @@ static inline void invalidate_tcache_page(unsigned long addr)
                : "r" (base),                                           \
                  "i" (op));
 
+#else
+/*
+ * MIPS R6 changed the cache opcode and moved to a 8-bit offset field.
+ * This means we now need to increment the base register before we flush
+ * more cache lines
+ */
+#define cache16_unroll32(base,op)                              \
+       __asm__ __volatile__(                                   \
+       "       .set push\n"                                    \
+       "       .set noreorder\n"                               \
+       "       .set mips64r6\n"                                \
+       "       .set noat\n"                                    \
+       "       cache %1, 0x000(%0); cache %1, 0x010(%0)\n"     \
+       "       cache %1, 0x020(%0); cache %1, 0x030(%0)\n"     \
+       "       cache %1, 0x040(%0); cache %1, 0x050(%0)\n"     \
+       "       cache %1, 0x060(%0); cache %1, 0x070(%0)\n"     \
+       "       cache %1, 0x080(%0); cache %1, 0x090(%0)\n"     \
+       "       cache %1, 0x0a0(%0); cache %1, 0x0b0(%0)\n"     \
+       "       cache %1, 0x0c0(%0); cache %1, 0x0d0(%0)\n"     \
+       "       cache %1, 0x0e0(%0); cache %1, 0x0f0(%0)\n"     \
+       "       addiu $1, $0, 0x100                     \n"     \
+       "       cache %1, 0x000($1); cache %1, 0x010($1)\n"     \
+       "       cache %1, 0x020($1); cache %1, 0x030($1)\n"     \
+       "       cache %1, 0x040($1); cache %1, 0x050($1)\n"     \
+       "       cache %1, 0x060($1); cache %1, 0x070($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x090($1)\n"     \
+       "       cache %1, 0x0a0($1); cache %1, 0x0b0($1)\n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0d0($1)\n"     \
+       "       cache %1, 0x0e0($1); cache %1, 0x0f0($1)\n"     \
+       "       .set pop\n"                                     \
+               :                                               \
+               : "r" (base),                                   \
+                 "i" (op));
+
+#define cache32_unroll32(base,op)                              \
+       __asm__ __volatile__(                                   \
+       "       .set push\n"                                    \
+       "       .set noreorder\n"                               \
+       "       .set mips64r6\n"                                \
+       "       .set noat\n"                                    \
+       "       cache %1, 0x000(%0); cache %1, 0x020(%0)\n"     \
+       "       cache %1, 0x040(%0); cache %1, 0x060(%0)\n"     \
+       "       cache %1, 0x080(%0); cache %1, 0x0a0(%0)\n"     \
+       "       cache %1, 0x0c0(%0); cache %1, 0x0e0(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
+       "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
+       "       addiu $1, $1, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
+       "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
+       "       addiu $1, $1, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x020($1)\n"     \
+       "       cache %1, 0x040($1); cache %1, 0x060($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0a0($1)\n"     \
+       "       cache %1, 0x0c0($1); cache %1, 0x0e0($1)\n"     \
+       "       .set pop\n"                                     \
+               :                                               \
+               : "r" (base),                                   \
+                 "i" (op));
+
+#define cache64_unroll32(base,op)                              \
+       __asm__ __volatile__(                                   \
+       "       .set push\n"                                    \
+       "       .set noreorder\n"                               \
+       "       .set mips64r6\n"                                \
+       "       .set noat\n"                                    \
+       "       cache %1, 0x000(%0); cache %1, 0x040(%0)\n"     \
+       "       cache %1, 0x080(%0); cache %1, 0x0c0(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000($1); cache %1, 0x040($1)\n"     \
+       "       cache %1, 0x080($1); cache %1, 0x0c0($1)\n"     \
+       "       .set pop\n"                                     \
+               :                                               \
+               : "r" (base),                                   \
+                 "i" (op));
+
+#define cache128_unroll32(base,op)                             \
+       __asm__ __volatile__(                                   \
+       "       .set push\n"                                    \
+       "       .set noreorder\n"                               \
+       "       .set mips64r6\n"                                \
+       "       .set noat\n"                                    \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       cache %1, 0x000(%0); cache %1, 0x080(%0)\n"     \
+       "       addiu $1, %0, 0x100\n"                          \
+       "       .set pop\n"                                     \
+               :                                               \
+               : "r" (base),                                   \
+                 "i" (op));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 /*
  * Perform the cache operation specified by op using a user mode virtual
  * address while in kernel mode.
index 753275accd1892142151169ed8ca722e41742d14..195db5045ae57fa972096417a41ba76b4048a4e5 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _ASM_SGIALIB_H
 #define _ASM_SGIALIB_H
 
+#include <linux/compiler.h>
 #include <asm/sgiarcs.h>
 
 extern struct linux_romvec *romvec;
@@ -70,8 +71,11 @@ extern LONG ArcRead(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
 extern LONG ArcWrite(ULONG fd, PVOID buf, ULONG num, PULONG cnt);
 
 /* Misc. routines. */
-extern VOID ArcReboot(VOID) __attribute__((noreturn));
-extern VOID ArcEnterInteractiveMode(VOID) __attribute__((noreturn));
+extern VOID ArcHalt(VOID) __noreturn;
+extern VOID ArcPowerDown(VOID) __noreturn;
+extern VOID ArcRestart(VOID) __noreturn;
+extern VOID ArcReboot(VOID) __noreturn;
+extern VOID ArcEnterInteractiveMode(VOID) __noreturn;
 extern VOID ArcFlushAllCaches(VOID);
 extern DISPLAY_STATUS *ArcGetDisplayStatus(ULONG FileID);
 
diff --git a/arch/mips/include/asm/siginfo.h b/arch/mips/include/asm/siginfo.h
deleted file mode 100644 (file)
index dd9a762..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License.  See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998, 1999, 2001, 2003 Ralf Baechle
- * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
- */
-#ifndef _ASM_SIGINFO_H
-#define _ASM_SIGINFO_H
-
-#include <uapi/asm/siginfo.h>
-
-
-/*
- * Duplicated here because of <asm-generic/siginfo.h> braindamage ...
- */
-#include <linux/string.h>
-
-static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
-{
-       if (from->si_code < 0)
-               memcpy(to, from, sizeof(*to));
-       else
-               /* _sigchld is currently the largest know union member */
-               memcpy(to, from, 3*sizeof(int) + sizeof(from->_sifields._sigchld));
-}
-
-#endif /* _ASM_SIGINFO_H */
index c6d06d383ef90df1cf7bb8a4f69aaa641b40e7fd..b4548690ade9916e1d94e844641a505fad43bea4 100644 (file)
@@ -89,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "        subu   %[ticket], %[ticket], 1                 \n"
                "       .previous                                       \n"
                "       .set pop                                        \n"
-               : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+               : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
                  [serving_now_ptr] "+m" (lock->h.serving_now),
                  [ticket] "=&r" (tmp),
                  [my_ticket] "=&r" (my_ticket)
@@ -122,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
                "        subu   %[ticket], %[ticket], 1                 \n"
                "       .previous                                       \n"
                "       .set pop                                        \n"
-               : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+               : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
                  [serving_now_ptr] "+m" (lock->h.serving_now),
                  [ticket] "=&r" (tmp),
                  [my_ticket] "=&r" (my_ticket)
@@ -164,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "        li     %[ticket], 0                            \n"
                "       .previous                                       \n"
                "       .set pop                                        \n"
-               : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+               : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
                  [ticket] "=&r" (tmp),
                  [my_ticket] "=&r" (tmp2),
                  [now_serving] "=&r" (tmp3)
@@ -188,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
                "        li     %[ticket], 0                            \n"
                "       .previous                                       \n"
                "       .set pop                                        \n"
-               : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
+               : [ticket_ptr] "+" GCC_OFF_SMALL_ASM() (lock->lock),
                  [ticket] "=&r" (tmp),
                  [my_ticket] "=&r" (tmp2),
                  [now_serving] "=&r" (tmp3)
@@ -235,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
                "       beqzl   %1, 1b                                  \n"
                "        nop                                            \n"
                "       .set    reorder                                 \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                do {
@@ -245,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
                        "       bltz    %1, 1b                          \n"
                        "        addu   %1, 1                           \n"
                        "2:     sc      %1, %0                          \n"
-                       : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-                       : GCC_OFF12_ASM() (rw->lock)
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
        }
@@ -254,9 +254,6 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
        smp_llsc_mb();
 }
 
-/* Note the use of sub, not subu which will make the kernel die with an
-   overflow exception if we ever try to unlock an rwlock that is already
-   unlocked or is being held by a writer.  */
 static inline void arch_read_unlock(arch_rwlock_t *rw)
 {
        unsigned int tmp;
@@ -266,20 +263,20 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
                "1:     ll      %1, %2          # arch_read_unlock      \n"
-               "       sub     %1, 1                                   \n"
+               "       addiu   %1, 1                                   \n"
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                do {
                        __asm__ __volatile__(
                        "1:     ll      %1, %2  # arch_read_unlock      \n"
-                       "       sub     %1, 1                           \n"
+                       "       addiu   %1, -1                          \n"
                        "       sc      %1, %0                          \n"
-                       : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-                       : GCC_OFF12_ASM() (rw->lock)
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
        }
@@ -299,8 +296,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
                "       beqzl   %1, 1b                                  \n"
                "        nop                                            \n"
                "       .set    reorder                                 \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                do {
@@ -309,8 +306,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
                        "       bnez    %1, 1b                          \n"
                        "        lui    %1, 0x8000                      \n"
                        "2:     sc      %1, %0                          \n"
-                       : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
-                       : GCC_OFF12_ASM() (rw->lock)
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
        }
@@ -349,8 +346,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
                __WEAK_LLSC_MB
                "       li      %2, 1                                   \n"
                "2:                                                     \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                __asm__ __volatile__(
@@ -366,8 +363,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
                __WEAK_LLSC_MB
                "       li      %2, 1                                   \n"
                "2:                                                     \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        }
 
@@ -393,8 +390,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
                "       li      %2, 1                                   \n"
                "       .set    reorder                                 \n"
                "2:                                                     \n"
-               : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
-               : GCC_OFF12_ASM() (rw->lock)
+               : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
+               : GCC_OFF_SMALL_ASM() (rw->lock)
                : "memory");
        } else {
                do {
@@ -406,9 +403,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
                        "       sc      %1, %0                          \n"
                        "       li      %2, 1                           \n"
                        "2:                                             \n"
-                       : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp),
+                       : "=" GCC_OFF_SMALL_ASM() (rw->lock), "=&r" (tmp),
                          "=&r" (ret)
-                       : GCC_OFF12_ASM() (rw->lock)
+                       : GCC_OFF_SMALL_ASM() (rw->lock)
                        : "memory");
                } while (unlikely(!tmp));
 
index 0b89006e490788ffcc26a9225762c66f4bd4ea8b..0f90d88e464d3cc005ab3ae0411d6996c5d41e1a 100644 (file)
@@ -1,10 +1,10 @@
 #ifndef _MIPS_SPRAM_H
 #define _MIPS_SPRAM_H
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_MIPS_SPRAM)
 extern __init void spram_config(void);
 #else
 static inline void spram_config(void) { };
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_MIPS_SPRAM */
 
 #endif /* _MIPS_SPRAM_H */
index b188c797565ce48bac812aacd98922ef31c00180..28d6d9364bd1f2c431df08c72f58262e5297ec5c 100644 (file)
@@ -40,7 +40,7 @@
                LONG_S  v1, PT_HI(sp)
                mflhxu  v1
                LONG_S  v1, PT_ACX(sp)
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
                mfhi    v1
 #endif
 #ifdef CONFIG_32BIT
@@ -50,7 +50,7 @@
                LONG_S  $10, PT_R10(sp)
                LONG_S  $11, PT_R11(sp)
                LONG_S  $12, PT_R12(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
                LONG_S  v1, PT_HI(sp)
                mflo    v1
 #endif
@@ -58,7 +58,7 @@
                LONG_S  $14, PT_R14(sp)
                LONG_S  $15, PT_R15(sp)
                LONG_S  $24, PT_R24(sp)
-#ifndef CONFIG_CPU_HAS_SMARTMIPS
+#if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
                LONG_S  v1, PT_LO(sp)
 #endif
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
                mtlhx   $24
                LONG_L  $24, PT_LO(sp)
                mtlhx   $24
-#else
+#elif !defined(CONFIG_CPU_MIPSR6)
                LONG_L  $24, PT_LO(sp)
                mtlo    $24
                LONG_L  $24, PT_HI(sp)
index b928b6f898cd5266efe89465d2dc87089f8f357c..e92d6c4b5ed192305b0b1f1605481f745cfadb10 100644 (file)
@@ -75,9 +75,12 @@ do {                                                                 \
 #endif
 
 #define __clear_software_ll_bit()                                      \
-do {                                                                   \
-       if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)       \
-               ll_bit = 0;                                             \
+do {   if (cpu_has_rw_llb) {                                           \
+               write_c0_lladdr(0);                                     \
+       } else {                                                        \
+               if (!__builtin_constant_p(cpu_has_llsc) || !cpu_has_llsc)\
+                       ll_bit = 0;                                     \
+       }                                                               \
 } while (0)
 
 #define switch_to(prev, next, last)                                    \
index 9e1295f874f0c143f3924aac7e606b93125ed9ce..55ed6602204cae5ae1219ee15f5a98b2c9f2813e 100644 (file)
@@ -28,7 +28,7 @@ struct thread_info {
        unsigned long           tp_value;       /* thread pointer */
        __u32                   cpu;            /* current CPU */
        int                     preempt_count;  /* 0 => preemptable, <0 => BUG */
-
+       int                     r2_emul_return; /* 1 => Returning from R2 emulator */
        mm_segment_t            addr_limit;     /*
                                                 * thread address space limit:
                                                 * 0x7fffffff for user-thead
index 89c22433b1c665ccf06c278f4bdf306642ffc828..fc0cf5ac0cf72ce28a38eec08a422719343ba4e4 100644 (file)
 enum major_op {
        spec_op, bcond_op, j_op, jal_op,
        beq_op, bne_op, blez_op, bgtz_op,
-       addi_op, addiu_op, slti_op, sltiu_op,
+       addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op,
        andi_op, ori_op, xori_op, lui_op,
        cop0_op, cop1_op, cop2_op, cop1x_op,
        beql_op, bnel_op, blezl_op, bgtzl_op,
-       daddi_op, daddiu_op, ldl_op, ldr_op,
+       daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
        spec2_op, jalx_op, mdmx_op, spec3_op,
        lb_op, lh_op, lwl_op, lw_op,
        lbu_op, lhu_op, lwr_op, lwu_op,
        sb_op, sh_op, swl_op, sw_op,
        sdl_op, sdr_op, swr_op, cache_op,
-       ll_op, lwc1_op, lwc2_op, pref_op,
-       lld_op, ldc1_op, ldc2_op, ld_op,
-       sc_op, swc1_op, swc2_op, major_3b_op,
-       scd_op, sdc1_op, sdc2_op, sd_op
+       ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op,
+       lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op,
+       sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op,
+       scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op
 };
 
 /*
@@ -83,9 +83,12 @@ enum spec3_op {
        swe_op    = 0x1f, bshfl_op  = 0x20,
        swle_op   = 0x21, swre_op   = 0x22,
        prefe_op  = 0x23, dbshfl_op = 0x24,
-       lbue_op   = 0x28, lhue_op   = 0x29,
-       lbe_op    = 0x2c, lhe_op    = 0x2d,
-       lle_op    = 0x2e, lwe_op    = 0x2f,
+       cache6_op = 0x25, sc6_op    = 0x26,
+       scd6_op   = 0x27, lbue_op   = 0x28,
+       lhue_op   = 0x29, lbe_op    = 0x2c,
+       lhe_op    = 0x2d, lle_op    = 0x2e,
+       lwe_op    = 0x2f, pref6_op  = 0x35,
+       ll6_op    = 0x36, lld6_op   = 0x37,
        rdhwr_op  = 0x3b
 };
 
@@ -112,7 +115,8 @@ enum cop_op {
        mfhc_op       = 0x03, mtc_op        = 0x04,
        dmtc_op       = 0x05, ctc_op        = 0x06,
        mthc0_op      = 0x06, mthc_op       = 0x07,
-       bc_op         = 0x08, cop_op        = 0x10,
+       bc_op         = 0x08, bc1eqz_op     = 0x09,
+       bc1nez_op     = 0x0d, cop_op        = 0x10,
        copm_op       = 0x18
 };
 
index d08f83f19db566899298e41ac06bf53f3a757400..2cb7fdead5702a5c8b5f0522e0c9f3e8e4ee96ba 100644 (file)
 
 #define HAVE_ARCH_SIGINFO_T
 
-/*
- * We duplicate the generic versions - <asm-generic/siginfo.h> is just borked
- * by design ...
- */
-#define HAVE_ARCH_COPY_SIGINFO
-struct siginfo;
-
 /*
  * Careful to keep union _sifields from shifting ...
  */
@@ -35,8 +28,9 @@ struct siginfo;
 
 #define __ARCH_SIGSYS
 
-#include <asm-generic/siginfo.h>
+#include <uapi/asm-generic/siginfo.h>
 
+/* We can't use generic siginfo_t, because our si_code and si_errno are swapped */
 typedef struct siginfo {
        int si_signo;
        int si_code;
@@ -124,5 +118,6 @@ typedef struct siginfo {
 #define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */
 #define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */
 
+#include <asm-generic/siginfo.h>
 
 #endif /* _UAPI_ASM_SIGINFO_H */
index 92987d1bbe5fe26e957ee8b9a1b5737f6181204c..d3d2ff2d76dc8f2e3643e9255226975e8e4699a4 100644 (file)
@@ -52,7 +52,7 @@ obj-$(CONFIG_MIPS_MT_SMP)     += smp-mt.o
 obj-$(CONFIG_MIPS_CMP)         += smp-cmp.o
 obj-$(CONFIG_MIPS_CPS)         += smp-cps.o cps-vec.o
 obj-$(CONFIG_MIPS_GIC_IPI)     += smp-gic.o
-obj-$(CONFIG_CPU_MIPSR2)       += spram.o
+obj-$(CONFIG_MIPS_SPRAM)       += spram.o
 
 obj-$(CONFIG_MIPS_VPE_LOADER)  += vpe.o
 obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
@@ -90,6 +90,7 @@ obj-$(CONFIG_EARLY_PRINTK)    += early_printk.o
 obj-$(CONFIG_EARLY_PRINTK_8250)        += early_printk_8250.o
 obj-$(CONFIG_SPINLOCK_TEST)    += spinlock_test.o
 obj-$(CONFIG_MIPS_MACHINE)     += mips_machine.o
+obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR)    += mips-r2-to-r6-emul.o
 
 CFLAGS_cpu-bugs64.o    = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
 
index 3b2dfdb4865fd9cbe208e41ca38e709db952c0e1..750d67ac41e9b19affe066d5be8d1f56f7363041 100644 (file)
@@ -97,6 +97,7 @@ void output_thread_info_defines(void)
        OFFSET(TI_TP_VALUE, thread_info, tp_value);
        OFFSET(TI_CPU, thread_info, cpu);
        OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+       OFFSET(TI_R2_EMUL_RET, thread_info, r2_emul_return);
        OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit);
        OFFSET(TI_REGS, thread_info, regs);
        DEFINE(_THREAD_SIZE, THREAD_SIZE);
@@ -381,6 +382,7 @@ void output_octeon_cop2_state_defines(void)
        OFFSET(OCTEON_CP2_GFM_RESULT,   octeon_cop2_state, cop2_gfm_result);
        OFFSET(OCTEON_CP2_HSH_DATW,     octeon_cop2_state, cop2_hsh_datw);
        OFFSET(OCTEON_CP2_HSH_IVW,      octeon_cop2_state, cop2_hsh_ivw);
+       OFFSET(OCTEON_CP2_SHA3,         octeon_cop2_state, cop2_sha3);
        OFFSET(THREAD_CP2,      task_struct, thread.cp2);
        OFFSET(THREAD_CVMSEG,   task_struct, thread.cvmseg.cvmseg);
        BLANK();
index 4d7d99d601cc13219e9d8f9631da6002b3d9df9d..c2e0f45ddf6cf48d05f7b97b5a095de0a04fca19 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
 #include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
 #include <asm/ptrace.h>
 #include <asm/uaccess.h>
 
@@ -399,11 +400,21 @@ int __MIPS16e_compute_return_epc(struct pt_regs *regs)
  * @returns:   -EFAULT on error and forces SIGBUS, and on success
  *             returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
  *             evaluating the branch.
+ *
+ * MIPS R6 Compact branches and forbidden slots:
+ *     Compact branches do not throw exceptions because they do
+ *     not have delay slots. The forbidden slot instruction ($PC+4)
+ *     is only executed if the branch was not taken. Otherwise the
+ *     forbidden slot is skipped entirely. This means that the
+ *     only possible reason to be here because of a MIPS R6 compact
+ *     branch instruction is that the forbidden slot has thrown one.
+ *     In that case the branch was not taken, so the EPC can be safely
+ *     set to EPC + 8.
  */
 int __compute_return_epc_for_insn(struct pt_regs *regs,
                                   union mips_instruction insn)
 {
-       unsigned int bit, fcr31, dspcontrol;
+       unsigned int bit, fcr31, dspcontrol, reg;
        long epc = regs->cp0_epc;
        int ret = 0;
 
@@ -417,6 +428,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        regs->regs[insn.r_format.rd] = epc + 8;
                        /* Fall through */
                case jr_op:
+                       if (NO_R6EMU && insn.r_format.func == jr_op)
+                               goto sigill_r6;
                        regs->cp0_epc = regs->regs[insn.r_format.rs];
                        break;
                }
@@ -429,8 +442,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
         */
        case bcond_op:
                switch (insn.i_format.rt) {
-               case bltz_op:
                case bltzl_op:
+                       if (NO_R6EMU)
+                               goto sigill_r6;
+               case bltz_op:
                        if ((long)regs->regs[insn.i_format.rs] < 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == bltzl_op)
@@ -440,8 +455,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        regs->cp0_epc = epc;
                        break;
 
-               case bgez_op:
                case bgezl_op:
+                       if (NO_R6EMU)
+                               goto sigill_r6;
+               case bgez_op:
                        if ((long)regs->regs[insn.i_format.rs] >= 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == bgezl_op)
@@ -453,7 +470,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
                case bltzal_op:
                case bltzall_op:
+                       if (NO_R6EMU && (insn.i_format.rs ||
+                           insn.i_format.rt == bltzall_op)) {
+                               ret = -SIGILL;
+                               break;
+                       }
                        regs->regs[31] = epc + 8;
+                       /*
+                        * OK we are here either because we hit a NAL
+                        * instruction or because we are emulating an
+                        * old bltzal{,l} one. Lets figure out what the
+                        * case really is.
+                        */
+                       if (!insn.i_format.rs) {
+                               /*
+                                * NAL or BLTZAL with rs == 0
+                                * Doesn't matter if we are R6 or not. The
+                                * result is the same
+                                */
+                               regs->cp0_epc += 4 +
+                                       (insn.i_format.simmediate << 2);
+                               break;
+                       }
+                       /* Now do the real thing for non-R6 BLTZAL{,L} */
                        if ((long)regs->regs[insn.i_format.rs] < 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == bltzall_op)
@@ -465,7 +504,29 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
                case bgezal_op:
                case bgezall_op:
+                       if (NO_R6EMU && (insn.i_format.rs ||
+                           insn.i_format.rt == bgezall_op)) {
+                               ret = -SIGILL;
+                               break;
+                       }
                        regs->regs[31] = epc + 8;
+                       /*
+                        * OK we are here either because we hit a BAL
+                        * instruction or because we are emulating an
+                        * old bgezal{,l} one. Lets figure out what the
+                        * case really is.
+                        */
+                       if (!insn.i_format.rs) {
+                               /*
+                                * BAL or BGEZAL with rs == 0
+                                * Doesn't matter if we are R6 or not. The
+                                * result is the same
+                                */
+                               regs->cp0_epc += 4 +
+                                       (insn.i_format.simmediate << 2);
+                               break;
+                       }
+                       /* Now do the real thing for non-R6 BGEZAL{,L} */
                        if ((long)regs->regs[insn.i_format.rs] >= 0) {
                                epc = epc + 4 + (insn.i_format.simmediate << 2);
                                if (insn.i_format.rt == bgezall_op)
@@ -477,7 +538,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
 
                case bposge32_op:
                        if (!cpu_has_dsp)
-                               goto sigill;
+                               goto sigill_dsp;
 
                        dspcontrol = rddsp(0x01);
 
@@ -508,8 +569,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
        /*
         * These are conditional and in i_format.
         */
-       case beq_op:
        case beql_op:
+               if (NO_R6EMU)
+                       goto sigill_r6;
+       case beq_op:
                if (regs->regs[insn.i_format.rs] ==
                    regs->regs[insn.i_format.rt]) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -520,8 +583,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                regs->cp0_epc = epc;
                break;
 
-       case bne_op:
        case bnel_op:
+               if (NO_R6EMU)
+                       goto sigill_r6;
+       case bne_op:
                if (regs->regs[insn.i_format.rs] !=
                    regs->regs[insn.i_format.rt]) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -532,8 +597,31 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                regs->cp0_epc = epc;
                break;
 
-       case blez_op: /* not really i_format */
-       case blezl_op:
+       case blezl_op: /* not really i_format */
+               if (NO_R6EMU)
+                       goto sigill_r6;
+       case blez_op:
+               /*
+                * Compact branches for R6 for the
+                * blez and blezl opcodes.
+                * BLEZ  | rs = 0 | rt != 0  == BLEZALC
+                * BLEZ  | rs = rt != 0      == BGEZALC
+                * BLEZ  | rs != 0 | rt != 0 == BGEUC
+                * BLEZL | rs = 0 | rt != 0  == BLEZC
+                * BLEZL | rs = rt != 0      == BGEZC
+                * BLEZL | rs != 0 | rt != 0 == BGEC
+                *
+                * For real BLEZ{,L}, rt is always 0.
+                */
+
+               if (cpu_has_mips_r6 && insn.i_format.rt) {
+                       if ((insn.i_format.opcode == blez_op) &&
+                           ((!insn.i_format.rs && insn.i_format.rt) ||
+                            (insn.i_format.rs == insn.i_format.rt)))
+                               regs->regs[31] = epc + 4;
+                       regs->cp0_epc += 8;
+                       break;
+               }
                /* rt field assumed to be zero */
                if ((long)regs->regs[insn.i_format.rs] <= 0) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -544,8 +632,32 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                regs->cp0_epc = epc;
                break;
 
-       case bgtz_op:
        case bgtzl_op:
+               if (NO_R6EMU)
+                       goto sigill_r6;
+       case bgtz_op:
+               /*
+                * Compact branches for R6 for the
+                * bgtz and bgtzl opcodes.
+                * BGTZ  | rs = 0 | rt != 0  == BGTZALC
+                * BGTZ  | rs = rt != 0      == BLTZALC
+                * BGTZ  | rs != 0 | rt != 0 == BLTUC
+                * BGTZL | rs = 0 | rt != 0  == BGTZC
+                * BGTZL | rs = rt != 0      == BLTZC
+                * BGTZL | rs != 0 | rt != 0 == BLTC
+                *
+                * *ZALC varint for BGTZ &&& rt != 0
+                * For real GTZ{,L}, rt is always 0.
+                */
+               if (cpu_has_mips_r6 && insn.i_format.rt) {
+                       if ((insn.i_format.opcode == blez_op) &&
+                           ((!insn.i_format.rs && insn.i_format.rt) ||
+                           (insn.i_format.rs == insn.i_format.rt)))
+                               regs->regs[31] = epc + 4;
+                       regs->cp0_epc += 8;
+                       break;
+               }
+
                /* rt field assumed to be zero */
                if ((long)regs->regs[insn.i_format.rs] > 0) {
                        epc = epc + 4 + (insn.i_format.simmediate << 2);
@@ -560,40 +672,83 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
         * And now the FPA/cp1 branch instructions.
         */
        case cop1_op:
-               preempt_disable();
-               if (is_fpu_owner())
-                       fcr31 = read_32bit_cp1_register(CP1_STATUS);
-               else
-                       fcr31 = current->thread.fpu.fcr31;
-               preempt_enable();
-
-               bit = (insn.i_format.rt >> 2);
-               bit += (bit != 0);
-               bit += 23;
-               switch (insn.i_format.rt & 3) {
-               case 0: /* bc1f */
-               case 2: /* bc1fl */
-                       if (~fcr31 & (1 << bit)) {
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                               if (insn.i_format.rt == 2)
-                                       ret = BRANCH_LIKELY_TAKEN;
-                       } else
+               if (cpu_has_mips_r6 &&
+                   ((insn.i_format.rs == bc1eqz_op) ||
+                    (insn.i_format.rs == bc1nez_op))) {
+                       if (!used_math()) { /* First time FPU user */
+                               ret = init_fpu();
+                               if (ret && NO_R6EMU) {
+                                       ret = -ret;
+                                       break;
+                               }
+                               ret = 0;
+                               set_used_math();
+                       }
+                       lose_fpu(1);    /* Save FPU state for the emulator. */
+                       reg = insn.i_format.rt;
+                       bit = 0;
+                       switch (insn.i_format.rs) {
+                       case bc1eqz_op:
+                               /* Test bit 0 */
+                               if (get_fpr32(&current->thread.fpu.fpr[reg], 0)
+                                   & 0x1)
+                                       bit = 1;
+                               break;
+                       case bc1nez_op:
+                               /* Test bit 0 */
+                               if (!(get_fpr32(&current->thread.fpu.fpr[reg], 0)
+                                     & 0x1))
+                                       bit = 1;
+                               break;
+                       }
+                       own_fpu(1);
+                       if (bit)
+                               epc = epc + 4 +
+                                       (insn.i_format.simmediate << 2);
+                       else
                                epc += 8;
                        regs->cp0_epc = epc;
+
                        break;
+               } else {
 
-               case 1: /* bc1t */
-               case 3: /* bc1tl */
-                       if (fcr31 & (1 << bit)) {
-                               epc = epc + 4 + (insn.i_format.simmediate << 2);
-                               if (insn.i_format.rt == 3)
-                                       ret = BRANCH_LIKELY_TAKEN;
-                       } else
-                               epc += 8;
-                       regs->cp0_epc = epc;
+                       preempt_disable();
+                       if (is_fpu_owner())
+                               fcr31 = read_32bit_cp1_register(CP1_STATUS);
+                       else
+                               fcr31 = current->thread.fpu.fcr31;
+                       preempt_enable();
+
+                       bit = (insn.i_format.rt >> 2);
+                       bit += (bit != 0);
+                       bit += 23;
+                       switch (insn.i_format.rt & 3) {
+                       case 0: /* bc1f */
+                       case 2: /* bc1fl */
+                               if (~fcr31 & (1 << bit)) {
+                                       epc = epc + 4 +
+                                               (insn.i_format.simmediate << 2);
+                                       if (insn.i_format.rt == 2)
+                                               ret = BRANCH_LIKELY_TAKEN;
+                               } else
+                                       epc += 8;
+                               regs->cp0_epc = epc;
+                               break;
+
+                       case 1: /* bc1t */
+                       case 3: /* bc1tl */
+                               if (fcr31 & (1 << bit)) {
+                                       epc = epc + 4 +
+                                               (insn.i_format.simmediate << 2);
+                                       if (insn.i_format.rt == 3)
+                                               ret = BRANCH_LIKELY_TAKEN;
+                               } else
+                                       epc += 8;
+                               regs->cp0_epc = epc;
+                               break;
+                       }
                        break;
                }
-               break;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
        case lwc2_op: /* This is bbit0 on Octeon */
                if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
@@ -626,15 +781,72 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
                        epc += 8;
                regs->cp0_epc = epc;
                break;
+#else
+       case bc6_op:
+               /* Only valid for MIPS R6 */
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               regs->cp0_epc += 8;
+               break;
+       case balc6_op:
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               /* Compact branch: BALC */
+               regs->regs[31] = epc + 4;
+               epc += 4 + (insn.i_format.simmediate << 2);
+               regs->cp0_epc = epc;
+               break;
+       case beqzcjic_op:
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               /* Compact branch: BEQZC || JIC */
+               regs->cp0_epc += 8;
+               break;
+       case bnezcjialc_op:
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               /* Compact branch: BNEZC || JIALC */
+               if (insn.i_format.rs)
+                       regs->regs[31] = epc + 4;
+               regs->cp0_epc += 8;
+               break;
 #endif
+       case cbcond0_op:
+       case cbcond1_op:
+               /* Only valid for MIPS R6 */
+               if (!cpu_has_mips_r6) {
+                       ret = -SIGILL;
+                       break;
+               }
+               /*
+                * Compact branches:
+                * bovc, beqc, beqzalc, bnvc, bnec, bnezlac
+                */
+               if (insn.i_format.rt && !insn.i_format.rs)
+                       regs->regs[31] = epc + 4;
+               regs->cp0_epc += 8;
+               break;
        }
 
        return ret;
 
-sigill:
+sigill_dsp:
        printk("%s: DSP branch but not DSP ASE - sending SIGBUS.\n", current->comm);
        force_sig(SIGBUS, current);
        return -EFAULT;
+sigill_r6:
+       pr_info("%s: R2 branch but r2-to-r6 emulator is not preset - sending SIGILL.\n",
+               current->comm);
+       force_sig(SIGILL, current);
+       return -EFAULT;
 }
 EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
 
index 6acaad0480af366830c77be996e9dba57fd8ad95..82bd2b278a243602dbf4c7ec15f8366af1ecf059 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/percpu.h>
 #include <linux/smp.h>
 #include <linux/irq.h>
-#include <linux/irqchip/mips-gic.h>
 
 #include <asm/time.h>
 #include <asm/cevt-r4k.h>
@@ -40,7 +39,7 @@ int cp0_timer_irq_installed;
 
 irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
 {
-       const int r2 = cpu_has_mips_r2;
+       const int r2 = cpu_has_mips_r2_r6;
        struct clock_event_device *cd;
        int cpu = smp_processor_id();
 
@@ -85,10 +84,7 @@ void mips_event_handler(struct clock_event_device *dev)
  */
 static int c0_compare_int_pending(void)
 {
-#ifdef CONFIG_MIPS_GIC
-       if (gic_present)
-               return gic_get_timer_pending();
-#endif
+       /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
        return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
 }
 
index 0384b05ab5a02413cbcb11a163375029f285255f..55b759a0019e61671d78919bc5b143ba5ff4a94d 100644 (file)
@@ -99,11 +99,11 @@ not_nmi:
        xori    t2, t1, 0x7
        beqz    t2, 1f
         li     t3, 32
-       addi    t1, t1, 1
+       addiu   t1, t1, 1
        sllv    t1, t3, t1
 1:     /* At this point t1 == I-cache sets per way */
        _EXT    t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
-       addi    t2, t2, 1
+       addiu   t2, t2, 1
        mul     t1, t1, t0
        mul     t1, t1, t2
 
@@ -126,11 +126,11 @@ icache_done:
        xori    t2, t1, 0x7
        beqz    t2, 1f
         li     t3, 32
-       addi    t1, t1, 1
+       addiu   t1, t1, 1
        sllv    t1, t3, t1
 1:     /* At this point t1 == D-cache sets per way */
        _EXT    t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
-       addi    t2, t2, 1
+       addiu   t2, t2, 1
        mul     t1, t1, t0
        mul     t1, t1, t2
 
@@ -250,7 +250,7 @@ LEAF(mips_cps_core_init)
        mfc0    t0, CP0_MVPCONF0
        srl     t0, t0, MVPCONF0_PVPE_SHIFT
        andi    t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
-       addi    t7, t0, 1
+       addiu   t7, t0, 1
 
        /* If there's only 1, we're done */
        beqz    t0, 2f
@@ -280,7 +280,7 @@ LEAF(mips_cps_core_init)
        mttc0   t0, CP0_TCHALT
 
        /* Next VPE */
-       addi    t5, t5, 1
+       addiu   t5, t5, 1
        slt     t0, t5, t7
        bnez    t0, 1b
         nop
@@ -317,7 +317,7 @@ LEAF(mips_cps_boot_vpes)
        mfc0    t1, CP0_MVPCONF0
        srl     t1, t1, MVPCONF0_PVPE_SHIFT
        andi    t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
-       addi    t1, t1, 1
+       addiu   t1, t1, 1
 
        /* Calculate a mask for the VPE ID from EBase.CPUNum */
        clz     t1, t1
@@ -424,7 +424,7 @@ LEAF(mips_cps_boot_vpes)
 
        /* Next VPE */
 2:     srl     t6, t6, 1
-       addi    t5, t5, 1
+       addiu   t5, t5, 1
        bnez    t6, 1b
         nop
 
index 2d80b5f1aeae29361843640d1d0c4148cbf489fd..09f4034f239f511867a4d56792de909d0ad0c773 100644 (file)
@@ -244,7 +244,7 @@ static inline void check_daddi(void)
        panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
 }
 
-int daddiu_bug = -1;
+int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1;
 
 static inline void check_daddiu(void)
 {
@@ -314,11 +314,14 @@ static inline void check_daddiu(void)
 
 void __init check_bugs64_early(void)
 {
-       check_mult_sh();
-       check_daddiu();
+       if (!config_enabled(CONFIG_CPU_MIPSR6)) {
+               check_mult_sh();
+               check_daddiu();
+       }
 }
 
 void __init check_bugs64(void)
 {
-       check_daddi();
+       if (!config_enabled(CONFIG_CPU_MIPSR6))
+               check_daddi();
 }
index 5342674842f5826572b71ce10a76c7a2f635139f..48dfb9de853ddc92ebf4254a95e400b0db5e1789 100644 (file)
@@ -237,6 +237,13 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
                c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
                break;
 
+       /* R6 incompatible with everything else */
+       case MIPS_CPU_ISA_M64R6:
+               c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6;
+       case MIPS_CPU_ISA_M32R6:
+               c->isa_level |= MIPS_CPU_ISA_M32R6;
+               /* Break here so we don't add incompatible ISAs */
+               break;
        case MIPS_CPU_ISA_M32R2:
                c->isa_level |= MIPS_CPU_ISA_M32R2;
        case MIPS_CPU_ISA_M32R1:
@@ -326,6 +333,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
                case 1:
                        set_isa(c, MIPS_CPU_ISA_M32R2);
                        break;
+               case 2:
+                       set_isa(c, MIPS_CPU_ISA_M32R6);
+                       break;
                default:
                        goto unknown;
                }
@@ -338,6 +348,9 @@ static inline unsigned int decode_config0(struct cpuinfo_mips *c)
                case 1:
                        set_isa(c, MIPS_CPU_ISA_M64R2);
                        break;
+               case 2:
+                       set_isa(c, MIPS_CPU_ISA_M64R6);
+                       break;
                default:
                        goto unknown;
                }
@@ -424,8 +437,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
        if (config3 & MIPS_CONF3_MSA)
                c->ases |= MIPS_ASE_MSA;
        /* Only tested on 32-bit cores */
-       if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT))
+       if ((config3 & MIPS_CONF3_PW) && config_enabled(CONFIG_32BIT)) {
+               c->htw_seq = 0;
                c->options |= MIPS_CPU_HTW;
+       }
 
        return config3 & MIPS_CONF_M;
 }
@@ -499,6 +514,8 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
                c->options |= MIPS_CPU_EVA;
        if (config5 & MIPS_CONF5_MRP)
                c->options |= MIPS_CPU_MAAR;
+       if (config5 & MIPS_CONF5_LLB)
+               c->options |= MIPS_CPU_RW_LLB;
 
        return config5 & MIPS_CONF_M;
 }
@@ -533,7 +550,7 @@ static void decode_configs(struct cpuinfo_mips *c)
 
        if (cpu_has_rixi) {
                /* Enable the RIXI exceptions */
-               write_c0_pagegrain(read_c0_pagegrain() | PG_IEC);
+               set_c0_pagegrain(PG_IEC);
                back_to_back_c0_hazard();
                /* Verify the IEC bit is set */
                if (read_c0_pagegrain() & PG_IEC)
@@ -541,7 +558,7 @@ static void decode_configs(struct cpuinfo_mips *c)
        }
 
 #ifndef CONFIG_MIPS_CPS
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2_r6) {
                c->core = get_ebase_cpunum();
                if (cpu_has_mipsmt)
                        c->core >>= fls(core_nvpes()) - 1;
@@ -896,6 +913,11 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
 {
        c->writecombine = _CACHE_UNCACHED_ACCELERATED;
        switch (c->processor_id & PRID_IMP_MASK) {
+       case PRID_IMP_QEMU_GENERIC:
+               c->writecombine = _CACHE_UNCACHED;
+               c->cputype = CPU_QEMU_GENERIC;
+               __cpu_name[cpu] = "MIPS GENERIC QEMU";
+               break;
        case PRID_IMP_4KC:
                c->cputype = CPU_4KC;
                c->writecombine = _CACHE_UNCACHED;
@@ -1345,8 +1367,7 @@ void cpu_probe(void)
        if (c->options & MIPS_CPU_FPU) {
                c->fpu_id = cpu_get_fpu_id();
 
-               if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-                                   MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+               if (c->isa_level & cpu_has_mips_r) {
                        if (c->fpu_id & MIPS_FPIR_3D)
                                c->ases |= MIPS_ASE_MIPS3D;
                        if (c->fpu_id & MIPS_FPIR_FREP)
@@ -1354,7 +1375,7 @@ void cpu_probe(void)
                }
        }
 
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2_r6) {
                c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
                /* R2 has Performance Counter Interrupt indicator */
                c->options |= MIPS_CPU_PCI;
index a5b5b56485c1618c34af3daea2b67e795a0f8c2b..d2c09f6475c5cb5454b34ed1c498d43fdc31b29e 100644 (file)
 #include <linux/elf.h>
 #include <linux/sched.h>
 
+/* FPU modes */
 enum {
-       FP_ERROR = -1,
-       FP_DOUBLE_64A = -2,
+       FP_FRE,
+       FP_FR0,
+       FP_FR1,
 };
 
+/**
+ * struct mode_req - ABI FPU mode requirements
+ * @single:    The program being loaded needs an FPU but it will only issue
+ *             single precision instructions meaning that it can execute in
+ *             either FR0 or FR1.
+ * @soft:      The soft(-float) requirement means that the program being
+ *             loaded needs has no FPU dependency at all (i.e. it has no
+ *             FPU instructions).
+ * @fr1:       The program being loaded depends on FPU being in FR=1 mode.
+ * @frdefault: The program being loaded depends on the default FPU mode.
+ *             That is FR0 for O32 and FR1 for N32/N64.
+ * @fre:       The program being loaded depends on FPU with FRE=1. This mode is
+ *             a bridge which uses FR=1 whilst still being able to maintain
+ *             full compatibility with pre-existing code using the O32 FP32
+ *             ABI.
+ *
+ * More information about the FP ABIs can be found here:
+ *
+ * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up
+ *
+ */
+
+struct mode_req {
+       bool single;
+       bool soft;
+       bool fr1;
+       bool frdefault;
+       bool fre;
+};
+
+static const struct mode_req fpu_reqs[] = {
+       [MIPS_ABI_FP_ANY]    = { true,  true,  true,  true,  true  },
+       [MIPS_ABI_FP_DOUBLE] = { false, false, false, true,  true  },
+       [MIPS_ABI_FP_SINGLE] = { true,  false, false, false, false },
+       [MIPS_ABI_FP_SOFT]   = { false, true,  false, false, false },
+       [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false },
+       [MIPS_ABI_FP_XX]     = { false, false, true,  true,  true  },
+       [MIPS_ABI_FP_64]     = { false, false, true,  false, false },
+       [MIPS_ABI_FP_64A]    = { false, false, true,  false, true  }
+};
+
+/*
+ * Mode requirements when .MIPS.abiflags is not present in the ELF.
+ * Not present means that everything is acceptable except FR1.
+ */
+static struct mode_req none_req = { true, true, false, true, true };
+
 int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
                     bool is_interp, struct arch_elf_state *state)
 {
-       struct elf32_hdr *ehdr = _ehdr;
-       struct elf32_phdr *phdr = _phdr;
+       struct elf32_hdr *ehdr32 = _ehdr;
+       struct elf32_phdr *phdr32 = _phdr;
+       struct elf64_phdr *phdr64 = _phdr;
        struct mips_elf_abiflags_v0 abiflags;
        int ret;
 
-       if (config_enabled(CONFIG_64BIT) &&
-           (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
-               return 0;
-       if (phdr->p_type != PT_MIPS_ABIFLAGS)
-               return 0;
-       if (phdr->p_filesz < sizeof(abiflags))
-               return -EINVAL;
+       /* Lets see if this is an O32 ELF */
+       if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
+               /* FR = 1 for N32 */
+               if (ehdr32->e_flags & EF_MIPS_ABI2)
+                       state->overall_fp_mode = FP_FR1;
+               else
+                       /* Set a good default FPU mode for O32 */
+                       state->overall_fp_mode = cpu_has_mips_r6 ?
+                               FP_FRE : FP_FR0;
+
+               if (ehdr32->e_flags & EF_MIPS_FP64) {
+                       /*
+                        * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
+                        * later if needed
+                        */
+                       if (is_interp)
+                               state->interp_fp_abi = MIPS_ABI_FP_OLD_64;
+                       else
+                               state->fp_abi = MIPS_ABI_FP_OLD_64;
+               }
+               if (phdr32->p_type != PT_MIPS_ABIFLAGS)
+                       return 0;
+
+               if (phdr32->p_filesz < sizeof(abiflags))
+                       return -EINVAL;
+
+               ret = kernel_read(elf, phdr32->p_offset,
+                                 (char *)&abiflags,
+                                 sizeof(abiflags));
+       } else {
+               /* FR=1 is really the only option for 64-bit */
+               state->overall_fp_mode = FP_FR1;
+
+               if (phdr64->p_type != PT_MIPS_ABIFLAGS)
+                       return 0;
+               if (phdr64->p_filesz < sizeof(abiflags))
+                       return -EINVAL;
+
+               ret = kernel_read(elf, phdr64->p_offset,
+                                 (char *)&abiflags,
+                                 sizeof(abiflags));
+       }
 
-       ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags,
-                         sizeof(abiflags));
        if (ret < 0)
                return ret;
        if (ret != sizeof(abiflags))
@@ -48,35 +131,30 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
        return 0;
 }
 
-static inline unsigned get_fp_abi(struct elf32_hdr *ehdr, int in_abi)
+static inline unsigned get_fp_abi(int in_abi)
 {
        /* If the ABI requirement is provided, simply return that */
-       if (in_abi != -1)
+       if (in_abi != MIPS_ABI_FP_UNKNOWN)
                return in_abi;
 
-       /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */
-       if (ehdr->e_flags & EF_MIPS_FP64)
-               return MIPS_ABI_FP_64;
-
-       /* Default to MIPS_ABI_FP_DOUBLE */
-       return MIPS_ABI_FP_DOUBLE;
+       /* Unknown ABI */
+       return MIPS_ABI_FP_UNKNOWN;
 }
 
 int arch_check_elf(void *_ehdr, bool has_interpreter,
                   struct arch_elf_state *state)
 {
        struct elf32_hdr *ehdr = _ehdr;
-       unsigned fp_abi, interp_fp_abi, abi0, abi1;
+       struct mode_req prog_req, interp_req;
+       int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
 
-       /* Ignore non-O32 binaries */
-       if (config_enabled(CONFIG_64BIT) &&
-           (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
+       if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
                return 0;
 
-       fp_abi = get_fp_abi(ehdr, state->fp_abi);
+       fp_abi = get_fp_abi(state->fp_abi);
 
        if (has_interpreter) {
-               interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi);
+               interp_fp_abi = get_fp_abi(state->interp_fp_abi);
 
                abi0 = min(fp_abi, interp_fp_abi);
                abi1 = max(fp_abi, interp_fp_abi);
@@ -84,108 +162,103 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
                abi0 = abi1 = fp_abi;
        }
 
-       state->overall_abi = FP_ERROR;
-
-       if (abi0 == abi1) {
-               state->overall_abi = abi0;
-       } else if (abi0 == MIPS_ABI_FP_ANY) {
-               state->overall_abi = abi1;
-       } else if (abi0 == MIPS_ABI_FP_DOUBLE) {
-               switch (abi1) {
-               case MIPS_ABI_FP_XX:
-                       state->overall_abi = MIPS_ABI_FP_DOUBLE;
-                       break;
-
-               case MIPS_ABI_FP_64A:
-                       state->overall_abi = FP_DOUBLE_64A;
-                       break;
-               }
-       } else if (abi0 == MIPS_ABI_FP_SINGLE ||
-                  abi0 == MIPS_ABI_FP_SOFT) {
-               /* Cannot link with other ABIs */
-       } else if (abi0 == MIPS_ABI_FP_OLD_64) {
-               switch (abi1) {
-               case MIPS_ABI_FP_XX:
-               case MIPS_ABI_FP_64:
-               case MIPS_ABI_FP_64A:
-                       state->overall_abi = MIPS_ABI_FP_64;
-                       break;
-               }
-       } else if (abi0 == MIPS_ABI_FP_XX ||
-                  abi0 == MIPS_ABI_FP_64 ||
-                  abi0 == MIPS_ABI_FP_64A) {
-               state->overall_abi = MIPS_ABI_FP_64;
-       }
+       /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
+       max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
+                  (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
+               MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
 
-       switch (state->overall_abi) {
-       case MIPS_ABI_FP_64:
-       case MIPS_ABI_FP_64A:
-       case FP_DOUBLE_64A:
-               if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
-                       return -ELIBBAD;
-               break;
+       if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
+           (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
+               return -ELIBBAD;
+
+       /* It's time to determine the FPU mode requirements */
+       prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0];
+       interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1];
 
-       case FP_ERROR:
+       /*
+        * Check whether the program's and interp's ABIs have a matching FPU
+        * mode requirement.
+        */
+       prog_req.single = interp_req.single && prog_req.single;
+       prog_req.soft = interp_req.soft && prog_req.soft;
+       prog_req.fr1 = interp_req.fr1 && prog_req.fr1;
+       prog_req.frdefault = interp_req.frdefault && prog_req.frdefault;
+       prog_req.fre = interp_req.fre && prog_req.fre;
+
+       /*
+        * Determine the desired FPU mode
+        *
+        * Decision making:
+        *
+        * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This
+        *   means that we have a combination of program and interpreter
+        *   that inherently require the hybrid FP mode.
+        * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or
+        *   fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU
+        *   instructions so we don't care about the mode. We will simply use
+        *   the one preferred by the hardware. In fpxx case, that ABI can
+        *   handle both FR=1 and FR=0, so, again, we simply choose the one
+        *   preferred by the hardware. Next, if we only use single-precision
+        *   FPU instructions, and the default ABI FPU mode is not good
+        *   (ie single + any ABI combination), we set again the FPU mode to the
+        *   one is preferred by the hardware. Next, if we know that the code
+        *   will only use single-precision instructions, shown by single being
+        *   true but frdefault being false, then we again set the FPU mode to
+        *   the one that is preferred by the hardware.
+        * - We want FP_FR1 if that's the only matching mode and the default one
+        *   is not good.
+        * - Return with -ELIBADD if we can't find a matching FPU mode.
+        */
+       if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1)
+               state->overall_fp_mode = FP_FRE;
+       else if ((prog_req.fr1 && prog_req.frdefault) ||
+                (prog_req.single && !prog_req.frdefault))
+               /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
+               state->overall_fp_mode = ((current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
+                                         cpu_has_mips_r2_r6) ?
+                                         FP_FR1 : FP_FR0;
+       else if (prog_req.fr1)
+               state->overall_fp_mode = FP_FR1;
+       else  if (!prog_req.fre && !prog_req.frdefault &&
+                 !prog_req.fr1 && !prog_req.single && !prog_req.soft)
                return -ELIBBAD;
-       }
 
        return 0;
 }
 
-void mips_set_personality_fp(struct arch_elf_state *state)
+static inline void set_thread_fp_mode(int hybrid, int regs32)
 {
-       if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) {
-               /*
-                * Use hybrid FPRs for all code which can correctly execute
-                * with that mode.
-                */
-               switch (state->overall_abi) {
-               case MIPS_ABI_FP_DOUBLE:
-               case MIPS_ABI_FP_SINGLE:
-               case MIPS_ABI_FP_SOFT:
-               case MIPS_ABI_FP_XX:
-               case MIPS_ABI_FP_ANY:
-                       /* FR=1, FRE=1 */
-                       clear_thread_flag(TIF_32BIT_FPREGS);
-                       set_thread_flag(TIF_HYBRID_FPREGS);
-                       return;
-               }
-       }
-
-       switch (state->overall_abi) {
-       case MIPS_ABI_FP_DOUBLE:
-       case MIPS_ABI_FP_SINGLE:
-       case MIPS_ABI_FP_SOFT:
-               /* FR=0 */
-               set_thread_flag(TIF_32BIT_FPREGS);
+       if (hybrid)
+               set_thread_flag(TIF_HYBRID_FPREGS);
+       else
                clear_thread_flag(TIF_HYBRID_FPREGS);
-               break;
-
-       case FP_DOUBLE_64A:
-               /* FR=1, FRE=1 */
+       if (regs32)
+               set_thread_flag(TIF_32BIT_FPREGS);
+       else
                clear_thread_flag(TIF_32BIT_FPREGS);
-               set_thread_flag(TIF_HYBRID_FPREGS);
-               break;
+}
 
-       case MIPS_ABI_FP_64:
-       case MIPS_ABI_FP_64A:
-               /* FR=1, FRE=0 */
-               clear_thread_flag(TIF_32BIT_FPREGS);
-               clear_thread_flag(TIF_HYBRID_FPREGS);
-               break;
+void mips_set_personality_fp(struct arch_elf_state *state)
+{
+       /*
+        * This function is only ever called for O32 ELFs so we should
+        * not be worried about N32/N64 binaries.
+        */
 
-       case MIPS_ABI_FP_XX:
-       case MIPS_ABI_FP_ANY:
-               if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
-                       set_thread_flag(TIF_32BIT_FPREGS);
-               else
-                       clear_thread_flag(TIF_32BIT_FPREGS);
+       if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+               return;
 
-               clear_thread_flag(TIF_HYBRID_FPREGS);
+       switch (state->overall_fp_mode) {
+       case FP_FRE:
+               set_thread_fp_mode(1, 0);
+               break;
+       case FP_FR0:
+               set_thread_fp_mode(0, 1);
+               break;
+       case FP_FR1:
+               set_thread_fp_mode(0, 0);
                break;
-
        default:
-       case FP_ERROR:
                BUG();
        }
 }
index 4353d323f0175cc2fcbefac58762b90ad7da0c59..af41ba6db9601d16540b945b0112e130bd9eb845 100644 (file)
@@ -46,6 +46,11 @@ resume_userspace:
        local_irq_disable               # make sure we dont miss an
                                        # interrupt setting need_resched
                                        # between sampling and return
+#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
+       lw      k0, TI_R2_EMUL_RET($28)
+       bnez    k0, restore_all_from_r2_emul
+#endif
+
        LONG_L  a2, TI_FLAGS($28)       # current->work
        andi    t0, a2, _TIF_WORK_MASK  # (ignoring syscall_trace)
        bnez    t0, work_pending
@@ -114,6 +119,19 @@ restore_partial:           # restore partial frame
        RESTORE_SP_AND_RET
        .set    at
 
+#ifdef CONFIG_MIPSR2_TO_R6_EMULATOR
+restore_all_from_r2_emul:                      # restore full frame
+       .set    noat
+       sw      zero, TI_R2_EMUL_RET($28)       # reset it
+       RESTORE_TEMP
+       RESTORE_AT
+       RESTORE_STATIC
+       RESTORE_SOME
+       LONG_L  sp, PT_R29(sp)
+       eretnc
+       .set    at
+#endif
+
 work_pending:
        andi    t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
        beqz    t0, work_notifysig
@@ -158,7 +176,8 @@ syscall_exit_work:
        jal     syscall_trace_leave
        b       resume_userspace
 
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT)
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) || \
+    defined(CONFIG_MIPS_MT)
 
 /*
  * MIPS32R2 Instruction Hazard Barrier - must be called
@@ -171,4 +190,4 @@ LEAF(mips_ihb)
        nop
        END(mips_ihb)
 
-#endif /* CONFIG_CPU_MIPSR2 or CONFIG_MIPS_MT */
+#endif /* CONFIG_CPU_MIPSR2 or CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
index a5e26dd9059256ed7c2a2033210bcd1bbc99b6e3..2ebaabe3af1513269e100d8bcffa9e8e9cb1f2c8 100644 (file)
@@ -125,7 +125,7 @@ LEAF(__r4k_wait)
        nop
        nop
 #endif
-       .set    arch=r4000
+       .set    MIPS_ISA_ARCH_LEVEL_RAW
        wait
        /* end of rollback region (the region size must be power of two) */
 1:
index 0b9082b6b6832d104a7994c0a6d44f13f61912cb..368c88b7eb6c985a848601415c9e8baf7aa2a5bb 100644 (file)
@@ -186,6 +186,7 @@ void __init check_wait(void)
        case CPU_PROAPTIV:
        case CPU_P5600:
        case CPU_M5150:
+       case CPU_QEMU_GENERIC:
                cpu_wait = r4k_wait;
                if (read_c0_config7() & MIPS_CONF7_WII)
                        cpu_wait = r4k_wait_irqoff;
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
new file mode 100644 (file)
index 0000000..64d17e4
--- /dev/null
@@ -0,0 +1,2378 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ *      MIPS R2 user space instruction emulator for MIPS R6
+ *
+ */
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/ptrace.h>
+#include <linux/seq_file.h>
+
+#include <asm/asm.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
+#include <asm/local.h>
+#include <asm/ptrace.h>
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_64BIT
+#define ADDIU  "daddiu "
+#define INS    "dins "
+#define EXT    "dext "
+#else
+#define ADDIU  "addiu "
+#define INS    "ins "
+#define EXT    "ext "
+#endif /* CONFIG_64BIT */
+
+#define SB     "sb "
+#define LB     "lb "
+#define LL     "ll "
+#define SC     "sc "
+
+DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
+DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
+DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
+
+extern const unsigned int fpucondbit[8];
+
+#define MIPS_R2_EMUL_TOTAL_PASS        10
+
+int mipsr2_emulation = 0;
+
+static int __init mipsr2emu_enable(char *s)
+{
+       mipsr2_emulation = 1;
+
+       pr_info("MIPS R2-to-R6 Emulator Enabled!");
+
+       return 1;
+}
+__setup("mipsr2emu", mipsr2emu_enable);
+
+/**
+ * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
+ * for performance instead of the traditional way of using a stack trampoline
+ * which is rather slow.
+ * @regs: Process register set
+ * @ir: Instruction
+ */
+static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
+{
+       switch (MIPSInst_OPCODE(ir)) {
+       case addiu_op:
+               if (MIPSInst_RT(ir))
+                       regs->regs[MIPSInst_RT(ir)] =
+                               (s32)regs->regs[MIPSInst_RS(ir)] +
+                               (s32)MIPSInst_SIMM(ir);
+               return 0;
+       case daddiu_op:
+               if (config_enabled(CONFIG_32BIT))
+                       break;
+
+               if (MIPSInst_RT(ir))
+                       regs->regs[MIPSInst_RT(ir)] =
+                               (s64)regs->regs[MIPSInst_RS(ir)] +
+                               (s64)MIPSInst_SIMM(ir);
+               return 0;
+       case lwc1_op:
+       case swc1_op:
+       case cop1_op:
+       case cop1x_op:
+               /* FPU instructions in delay slot */
+               return -SIGFPE;
+       case spec_op:
+               switch (MIPSInst_FUNC(ir)) {
+               case or_op:
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       regs->regs[MIPSInst_RS(ir)] |
+                                       regs->regs[MIPSInst_RT(ir)];
+                       return 0;
+               case sll_op:
+                       if (MIPSInst_RS(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
+                                               MIPSInst_FD(ir));
+                       return 0;
+               case srl_op:
+                       if (MIPSInst_RS(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
+                                               MIPSInst_FD(ir));
+                       return 0;
+               case addu_op:
+                       if (MIPSInst_FD(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s32)((u32)regs->regs[MIPSInst_RS(ir)] +
+                                             (u32)regs->regs[MIPSInst_RT(ir)]);
+                       return 0;
+               case subu_op:
+                       if (MIPSInst_FD(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s32)((u32)regs->regs[MIPSInst_RS(ir)] -
+                                             (u32)regs->regs[MIPSInst_RT(ir)]);
+                       return 0;
+               case dsll_op:
+                       if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
+                                               MIPSInst_FD(ir));
+                       return 0;
+               case dsrl_op:
+                       if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
+                                               MIPSInst_FD(ir));
+                       return 0;
+               case daddu_op:
+                       if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (u64)regs->regs[MIPSInst_RS(ir)] +
+                                       (u64)regs->regs[MIPSInst_RT(ir)];
+                       return 0;
+               case dsubu_op:
+                       if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+                               break;
+
+                       if (MIPSInst_RD(ir))
+                               regs->regs[MIPSInst_RD(ir)] =
+                                       (s64)((u64)regs->regs[MIPSInst_RS(ir)] -
+                                             (u64)regs->regs[MIPSInst_RT(ir)]);
+                       return 0;
+               }
+               break;
+       default:
+               pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
+                        ir, MIPSInst_OPCODE(ir));
+       }
+
+       return SIGILL;
+}
+
+/**
+ * movt_func - Emulate a MOVT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movf_func(struct pt_regs *regs, u32 ir)
+{
+       u32 csr;
+       u32 cond;
+
+       csr = current->thread.fpu.fcr31;
+       cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+       if (((csr & cond) == 0) && MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+       MIPS_R2_STATS(movs);
+       return 0;
+}
+
+/**
+ * movt_func - Emulate a MOVT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movt_func(struct pt_regs *regs, u32 ir)
+{
+       u32 csr;
+       u32 cond;
+
+       csr = current->thread.fpu.fcr31;
+       cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+
+       if (((csr & cond) != 0) && MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+
+       MIPS_R2_STATS(movs);
+
+       return 0;
+}
+
+/**
+ * jr_func - Emulate a JR instruction.
+ * @pt_regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns SIGILL if JR was in delay slot, SIGEMT if we
+ * can't compute the EPC, SIGSEGV if we can't access the
+ * userland instruction or 0 on success.
+ */
+static int jr_func(struct pt_regs *regs, u32 ir)
+{
+       int err;
+       unsigned long cepc, epc, nepc;
+       u32 nir;
+
+       if (delay_slot(regs))
+               return SIGILL;
+
+       /* EPC after the RI/JR instruction */
+       nepc = regs->cp0_epc;
+       /* Roll back to the reserved R2 JR instruction */
+       regs->cp0_epc -= 4;
+       epc = regs->cp0_epc;
+       err = __compute_return_epc(regs);
+
+       if (err < 0)
+               return SIGEMT;
+
+
+       /* Computed EPC */
+       cepc = regs->cp0_epc;
+
+       /* Get DS instruction */
+       err = __get_user(nir, (u32 __user *)nepc);
+       if (err)
+               return SIGSEGV;
+
+       MIPS_R2BR_STATS(jrs);
+
+       /* If nir == 0(NOP), then nothing else to do */
+       if (nir) {
+               /*
+                * Negative err means FPU instruction in BD-slot,
+                * Zero err means 'BD-slot emulation done'
+                * For anything else we go back to trampoline emulation.
+                */
+               err = mipsr6_emul(regs, nir);
+               if (err > 0) {
+                       regs->cp0_epc = nepc;
+                       err = mips_dsemul(regs, nir, cepc);
+                       if (err == SIGILL)
+                               err = SIGEMT;
+                       MIPS_R2_STATS(dsemul);
+               }
+       }
+
+       return err;
+}
+
+/**
+ * movz_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movz_func(struct pt_regs *regs, u32 ir)
+{
+       if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+       MIPS_R2_STATS(movs);
+
+       return 0;
+}
+
+/**
+ * movn_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movn_func(struct pt_regs *regs, u32 ir)
+{
+       if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+       MIPS_R2_STATS(movs);
+
+       return 0;
+}
+
+/**
+ * mfhi_func - Emulate a MFHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mfhi_func(struct pt_regs *regs, u32 ir)
+{
+       if (MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->hi;
+
+       MIPS_R2_STATS(hilo);
+
+       return 0;
+}
+
+/**
+ * mthi_func - Emulate a MTHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mthi_func(struct pt_regs *regs, u32 ir)
+{
+       regs->hi = regs->regs[MIPSInst_RS(ir)];
+
+       MIPS_R2_STATS(hilo);
+
+       return 0;
+}
+
+/**
+ * mflo_func - Emulate a MFLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mflo_func(struct pt_regs *regs, u32 ir)
+{
+       if (MIPSInst_RD(ir))
+               regs->regs[MIPSInst_RD(ir)] = regs->lo;
+
+       MIPS_R2_STATS(hilo);
+
+       return 0;
+}
+
+/**
+ * mtlo_func - Emulate a MTLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mtlo_func(struct pt_regs *regs, u32 ir)
+{
+       regs->lo = regs->regs[MIPSInst_RS(ir)];
+
+       MIPS_R2_STATS(hilo);
+
+       return 0;
+}
+
+/**
+ * mult_func - Emulate a MULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mult_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (s64)rt * (s64)rs;
+
+       rs = res;
+       regs->lo = (s64)rs;
+       rt = res >> 32;
+       res = (s64)rt;
+       regs->hi = res;
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * multu_func - Emulate a MULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int multu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (u64)rt * (u64)rs;
+       rt = res;
+       regs->lo = (s64)rt;
+       regs->hi = (s64)(res >> 32);
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * div_func - Emulate a DIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int div_func(struct pt_regs *regs, u32 ir)
+{
+       s32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+
+       regs->lo = (s64)(rs / rt);
+       regs->hi = (s64)(rs % rt);
+
+       MIPS_R2_STATS(divs);
+
+       return 0;
+}
+
+/**
+ * divu_func - Emulate a DIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int divu_func(struct pt_regs *regs, u32 ir)
+{
+       u32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+
+       regs->lo = (s64)(rs / rt);
+       regs->hi = (s64)(rs % rt);
+
+       MIPS_R2_STATS(divs);
+
+       return 0;
+}
+
+/**
+ * dmult_func - Emulate a DMULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmult_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s64 rt, rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = rt * rs;
+
+       regs->lo = res;
+       __asm__ __volatile__(
+               "dmuh %0, %1, %2\t\n"
+               : "=r"(res)
+               : "r"(rt), "r"(rs));
+
+       regs->hi = res;
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * dmultu_func - Emulate a DMULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmultu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u64 rt, rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = rt * rs;
+
+       regs->lo = res;
+       __asm__ __volatile__(
+               "dmuhu %0, %1, %2\t\n"
+               : "=r"(res)
+               : "r"(rt), "r"(rs));
+
+       regs->hi = res;
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * ddiv_func - Emulate a DDIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddiv_func(struct pt_regs *regs, u32 ir)
+{
+       s64 rt, rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+
+       regs->lo = rs / rt;
+       regs->hi = rs % rt;
+
+       MIPS_R2_STATS(divs);
+
+       return 0;
+}
+
+/**
+ * ddivu_func - Emulate a DDIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddivu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 rt, rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+
+       regs->lo = rs / rt;
+       regs->hi = rs % rt;
+
+       MIPS_R2_STATS(divs);
+
+       return 0;
+}
+
+/* R6 removed instructions for the SPECIAL opcode */
+static struct r2_decoder_table spec_op_table[] = {
+       { 0xfc1ff83f, 0x00000008, jr_func },
+       { 0xfc00ffff, 0x00000018, mult_func },
+       { 0xfc00ffff, 0x00000019, multu_func },
+       { 0xfc00ffff, 0x0000001c, dmult_func },
+       { 0xfc00ffff, 0x0000001d, dmultu_func },
+       { 0xffff07ff, 0x00000010, mfhi_func },
+       { 0xfc1fffff, 0x00000011, mthi_func },
+       { 0xffff07ff, 0x00000012, mflo_func },
+       { 0xfc1fffff, 0x00000013, mtlo_func },
+       { 0xfc0307ff, 0x00000001, movf_func },
+       { 0xfc0307ff, 0x00010001, movt_func },
+       { 0xfc0007ff, 0x0000000a, movz_func },
+       { 0xfc0007ff, 0x0000000b, movn_func },
+       { 0xfc00ffff, 0x0000001a, div_func },
+       { 0xfc00ffff, 0x0000001b, divu_func },
+       { 0xfc00ffff, 0x0000001e, ddiv_func },
+       { 0xfc00ffff, 0x0000001f, ddivu_func },
+       {}
+};
+
+/**
+ * madd_func - Emulate a MADD instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int madd_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (s64)rt * (s64)rs;
+       rt = regs->hi;
+       rs = regs->lo;
+       res += ((((s64)rt) << 32) | (u32)rs);
+
+       rt = res;
+       regs->lo = (s64)rt;
+       rs = res >> 32;
+       regs->hi = (s64)rs;
+
+       MIPS_R2_STATS(dsps);
+
+       return 0;
+}
+
+/**
+ * maddu_func - Emulate a MADDU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int maddu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (u64)rt * (u64)rs;
+       rt = regs->hi;
+       rs = regs->lo;
+       res += ((((s64)rt) << 32) | (u32)rs);
+
+       rt = res;
+       regs->lo = (s64)rt;
+       rs = res >> 32;
+       regs->hi = (s64)rs;
+
+       MIPS_R2_STATS(dsps);
+
+       return 0;
+}
+
+/**
+ * msub_func - Emulate a MSUB instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msub_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (s64)rt * (s64)rs;
+       rt = regs->hi;
+       rs = regs->lo;
+       res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+       rt = res;
+       regs->lo = (s64)rt;
+       rs = res >> 32;
+       regs->hi = (s64)rs;
+
+       MIPS_R2_STATS(dsps);
+
+       return 0;
+}
+
+/**
+ * msubu_func - Emulate a MSUBU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msubu_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u32 rt, rs;
+
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (u64)rt * (u64)rs;
+       rt = regs->hi;
+       rs = regs->lo;
+       res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+       rt = res;
+       regs->lo = (s64)rt;
+       rs = res >> 32;
+       regs->hi = (s64)rs;
+
+       MIPS_R2_STATS(dsps);
+
+       return 0;
+}
+
+/**
+ * mul_func - Emulate a MUL instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mul_func(struct pt_regs *regs, u32 ir)
+{
+       s64 res;
+       s32 rt, rs;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+       rt = regs->regs[MIPSInst_RT(ir)];
+       rs = regs->regs[MIPSInst_RS(ir)];
+       res = (s64)rt * (s64)rs;
+
+       rs = res;
+       regs->regs[MIPSInst_RD(ir)] = (s64)rs;
+
+       MIPS_R2_STATS(muls);
+
+       return 0;
+}
+
+/**
+ * clz_func - Emulate a CLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int clz_func(struct pt_regs *regs, u32 ir)
+{
+       u32 res;
+       u32 rs;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+
+       rs = regs->regs[MIPSInst_RS(ir)];
+       __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
+       regs->regs[MIPSInst_RD(ir)] = res;
+
+       MIPS_R2_STATS(bops);
+
+       return 0;
+}
+
+/**
+ * clo_func - Emulate a CLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+
+static int clo_func(struct pt_regs *regs, u32 ir)
+{
+       u32 res;
+       u32 rs;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+
+       rs = regs->regs[MIPSInst_RS(ir)];
+       __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
+       regs->regs[MIPSInst_RD(ir)] = res;
+
+       MIPS_R2_STATS(bops);
+
+       return 0;
+}
+
+/**
+ * dclz_func - Emulate a DCLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclz_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u64 rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+
+       rs = regs->regs[MIPSInst_RS(ir)];
+       __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
+       regs->regs[MIPSInst_RD(ir)] = res;
+
+       MIPS_R2_STATS(bops);
+
+       return 0;
+}
+
+/**
+ * dclo_func - Emulate a DCLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclo_func(struct pt_regs *regs, u32 ir)
+{
+       u64 res;
+       u64 rs;
+
+       if (config_enabled(CONFIG_32BIT))
+               return SIGILL;
+
+       if (!MIPSInst_RD(ir))
+               return 0;
+
+       rs = regs->regs[MIPSInst_RS(ir)];
+       __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
+       regs->regs[MIPSInst_RD(ir)] = res;
+
+       MIPS_R2_STATS(bops);
+
+       return 0;
+}
+
+/* R6 removed instructions for the SPECIAL2 opcode */
+static struct r2_decoder_table spec2_op_table[] = {
+       { 0xfc00ffff, 0x70000000, madd_func },
+       { 0xfc00ffff, 0x70000001, maddu_func },
+       { 0xfc0007ff, 0x70000002, mul_func },
+       { 0xfc00ffff, 0x70000004, msub_func },
+       { 0xfc00ffff, 0x70000005, msubu_func },
+       { 0xfc0007ff, 0x70000020, clz_func },
+       { 0xfc0007ff, 0x70000021, clo_func },
+       { 0xfc0007ff, 0x70000024, dclz_func },
+       { 0xfc0007ff, 0x70000025, dclo_func },
+       { }
+};
+
+static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
+                                     struct r2_decoder_table *table)
+{
+       struct r2_decoder_table *p;
+       int err;
+
+       for (p = table; p->func; p++) {
+               if ((inst & p->mask) == p->code) {
+                       err = (p->func)(regs, inst);
+                       return err;
+               }
+       }
+       return SIGILL;
+}
+
+/**
+ * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
+ * @regs: Process register set
+ * @inst: Instruction to decode and emulate
+ */
+int mipsr2_decoder(struct pt_regs *regs, u32 inst)
+{
+       int err = 0;
+       unsigned long vaddr;
+       u32 nir;
+       unsigned long cpc, epc, nepc, r31, res, rs, rt;
+
+       void __user *fault_addr = NULL;
+       int pass = 0;
+
+repeat:
+       r31 = regs->regs[31];
+       epc = regs->cp0_epc;
+       err = compute_return_epc(regs);
+       if (err < 0) {
+               BUG();
+               return SIGEMT;
+       }
+       pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
+                inst, epc, pass);
+
+       switch (MIPSInst_OPCODE(inst)) {
+       case spec_op:
+               err = mipsr2_find_op_func(regs, inst, spec_op_table);
+               if (err < 0) {
+                       /* FPU instruction under JR */
+                       regs->cp0_cause |= CAUSEF_BD;
+                       goto fpu_emul;
+               }
+               break;
+       case spec2_op:
+               err = mipsr2_find_op_func(regs, inst, spec2_op_table);
+               break;
+       case bcond_op:
+               rt = MIPSInst_RT(inst);
+               rs = MIPSInst_RS(inst);
+               switch (rt) {
+               case tgei_op:
+                       if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
+                               do_trap_or_bp(regs, 0, "TGEI");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case tgeiu_op:
+                       if (regs->regs[rs] >= MIPSInst_UIMM(inst))
+                               do_trap_or_bp(regs, 0, "TGEIU");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case tlti_op:
+                       if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
+                               do_trap_or_bp(regs, 0, "TLTI");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case tltiu_op:
+                       if (regs->regs[rs] < MIPSInst_UIMM(inst))
+                               do_trap_or_bp(regs, 0, "TLTIU");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case teqi_op:
+                       if (regs->regs[rs] == MIPSInst_SIMM(inst))
+                               do_trap_or_bp(regs, 0, "TEQI");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case tnei_op:
+                       if (regs->regs[rs] != MIPSInst_SIMM(inst))
+                               do_trap_or_bp(regs, 0, "TNEI");
+
+                       MIPS_R2_STATS(traps);
+
+                       break;
+               case bltzl_op:
+               case bgezl_op:
+               case bltzall_op:
+               case bgezall_op:
+                       if (delay_slot(regs)) {
+                               err = SIGILL;
+                               break;
+                       }
+                       regs->regs[31] = r31;
+                       regs->cp0_epc = epc;
+                       err = __compute_return_epc(regs);
+                       if (err < 0)
+                               return SIGEMT;
+                       if (err != BRANCH_LIKELY_TAKEN)
+                               break;
+                       cpc = regs->cp0_epc;
+                       nepc = epc + 4;
+                       err = __get_user(nir, (u32 __user *)nepc);
+                       if (err) {
+                               err = SIGSEGV;
+                               break;
+                       }
+                       /*
+                        * This will probably be optimized away when
+                        * CONFIG_DEBUG_FS is not enabled
+                        */
+                       switch (rt) {
+                       case bltzl_op:
+                               MIPS_R2BR_STATS(bltzl);
+                               break;
+                       case bgezl_op:
+                               MIPS_R2BR_STATS(bgezl);
+                               break;
+                       case bltzall_op:
+                               MIPS_R2BR_STATS(bltzall);
+                               break;
+                       case bgezall_op:
+                               MIPS_R2BR_STATS(bgezall);
+                               break;
+                       }
+
+                       switch (MIPSInst_OPCODE(nir)) {
+                       case cop1_op:
+                       case cop1x_op:
+                       case lwc1_op:
+                       case swc1_op:
+                               regs->cp0_cause |= CAUSEF_BD;
+                               goto fpu_emul;
+                       }
+                       if (nir) {
+                               err = mipsr6_emul(regs, nir);
+                               if (err > 0) {
+                                       err = mips_dsemul(regs, nir, cpc);
+                                       if (err == SIGILL)
+                                               err = SIGEMT;
+                                       MIPS_R2_STATS(dsemul);
+                               }
+                       }
+                       break;
+               case bltzal_op:
+               case bgezal_op:
+                       if (delay_slot(regs)) {
+                               err = SIGILL;
+                               break;
+                       }
+                       regs->regs[31] = r31;
+                       regs->cp0_epc = epc;
+                       err = __compute_return_epc(regs);
+                       if (err < 0)
+                               return SIGEMT;
+                       cpc = regs->cp0_epc;
+                       nepc = epc + 4;
+                       err = __get_user(nir, (u32 __user *)nepc);
+                       if (err) {
+                               err = SIGSEGV;
+                               break;
+                       }
+                       /*
+                        * This will probably be optimized away when
+                        * CONFIG_DEBUG_FS is not enabled
+                        */
+                       switch (rt) {
+                       case bltzal_op:
+                               MIPS_R2BR_STATS(bltzal);
+                               break;
+                       case bgezal_op:
+                               MIPS_R2BR_STATS(bgezal);
+                               break;
+                       }
+
+                       switch (MIPSInst_OPCODE(nir)) {
+                       case cop1_op:
+                       case cop1x_op:
+                       case lwc1_op:
+                       case swc1_op:
+                               regs->cp0_cause |= CAUSEF_BD;
+                               goto fpu_emul;
+                       }
+                       if (nir) {
+                               err = mipsr6_emul(regs, nir);
+                               if (err > 0) {
+                                       err = mips_dsemul(regs, nir, cpc);
+                                       if (err == SIGILL)
+                                               err = SIGEMT;
+                                       MIPS_R2_STATS(dsemul);
+                               }
+                       }
+                       break;
+               default:
+                       regs->regs[31] = r31;
+                       regs->cp0_epc = epc;
+                       err = SIGILL;
+                       break;
+               }
+               break;
+
+       case beql_op:
+       case bnel_op:
+       case blezl_op:
+       case bgtzl_op:
+               if (delay_slot(regs)) {
+                       err = SIGILL;
+                       break;
+               }
+               regs->regs[31] = r31;
+               regs->cp0_epc = epc;
+               err = __compute_return_epc(regs);
+               if (err < 0)
+                       return SIGEMT;
+               if (err != BRANCH_LIKELY_TAKEN)
+                       break;
+               cpc = regs->cp0_epc;
+               nepc = epc + 4;
+               err = __get_user(nir, (u32 __user *)nepc);
+               if (err) {
+                       err = SIGSEGV;
+                       break;
+               }
+               /*
+                * This will probably be optimized away when
+                * CONFIG_DEBUG_FS is not enabled
+                */
+               switch (MIPSInst_OPCODE(inst)) {
+               case beql_op:
+                       MIPS_R2BR_STATS(beql);
+                       break;
+               case bnel_op:
+                       MIPS_R2BR_STATS(bnel);
+                       break;
+               case blezl_op:
+                       MIPS_R2BR_STATS(blezl);
+                       break;
+               case bgtzl_op:
+                       MIPS_R2BR_STATS(bgtzl);
+                       break;
+               }
+
+               switch (MIPSInst_OPCODE(nir)) {
+               case cop1_op:
+               case cop1x_op:
+               case lwc1_op:
+               case swc1_op:
+                       regs->cp0_cause |= CAUSEF_BD;
+                       goto fpu_emul;
+               }
+               if (nir) {
+                       err = mipsr6_emul(regs, nir);
+                       if (err > 0) {
+                               err = mips_dsemul(regs, nir, cpc);
+                               if (err == SIGILL)
+                                       err = SIGEMT;
+                               MIPS_R2_STATS(dsemul);
+                       }
+               }
+               break;
+       case lwc1_op:
+       case swc1_op:
+       case cop1_op:
+       case cop1x_op:
+fpu_emul:
+               regs->regs[31] = r31;
+               regs->cp0_epc = epc;
+               if (!used_math()) {     /* First time FPU user.  */
+                       err = init_fpu();
+                       set_used_math();
+               }
+               lose_fpu(1);    /* Save FPU state for the emulator. */
+
+               err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
+                                              &fault_addr);
+
+               /*
+                * this is a tricky issue - lose_fpu() uses LL/SC atomics
+                * if FPU is owned and effectively cancels user level LL/SC.
+                * So, it could be logical to don't restore FPU ownership here.
+                * But the sequence of multiple FPU instructions is much much
+                * more often than LL-FPU-SC and I prefer loop here until
+                * next scheduler cycle cancels FPU ownership
+                */
+               own_fpu(1);     /* Restore FPU state. */
+
+               if (err)
+                       current->thread.cp0_baduaddr = (unsigned long)fault_addr;
+
+               MIPS_R2_STATS(fpus);
+
+               break;
+
+       case lwl_op:
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_READ, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "1:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 24, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "2:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 16, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "3:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 8, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "4:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "1:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 24, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "2:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 16, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "3:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 8, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "4:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:     sll     %0, %0, 0\n"
+                       "10:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       10b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV));
+
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = rt;
+
+               MIPS_R2_STATS(loads);
+
+               break;
+
+       case lwr_op:
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_READ, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "1:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 0, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "2:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 8, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "3:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 16, 8\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                       "4:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 24, 8\n"
+                       "       sll     %0, %0, 0\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "1:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 0, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "2:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 8, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "3:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 16, 8\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                       "4:"    LB      "%1, 0(%2)\n"
+                               INS     "%0, %1, 24, 8\n"
+                       "       sll     %0, %0, 0\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "10:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       10b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV));
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = rt;
+
+               MIPS_R2_STATS(loads);
+
+               break;
+
+       case swl_op:
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                               EXT     "%1, %0, 24, 8\n"
+                       "1:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 16, 8\n"
+                       "2:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 8, 8\n"
+                       "3:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 0, 8\n"
+                       "4:"    SB      "%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                               EXT     "%1, %0, 24, 8\n"
+                       "1:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 16, 8\n"
+                       "2:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 8, 8\n"
+                       "3:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 0, 8\n"
+                       "4:"    SB      "%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV)
+                       : "memory");
+
+               MIPS_R2_STATS(stores);
+
+               break;
+
+       case swr_op:
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                               EXT     "%1, %0, 0, 8\n"
+                       "1:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 8, 8\n"
+                       "2:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 16, 8\n"
+                       "3:"    SB      "%1, 0(%2)\n"
+                               ADDIU   "%2, %2, 1\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               EXT     "%1, %0, 24, 8\n"
+                       "4:"    SB      "%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                               EXT     "%1, %0, 0, 8\n"
+                       "1:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 8, 8\n"
+                       "2:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 16, 8\n"
+                       "3:"    SB      "%1, 0(%2)\n"
+                       "       andi    %1, %2, 0x3\n"
+                       "       beq     $0, %1, 9f\n"
+                               ADDIU   "%2, %2, -1\n"
+                               EXT     "%1, %0, 24, 8\n"
+                       "4:"    SB      "%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV)
+                       : "memory");
+
+               MIPS_R2_STATS(stores);
+
+               break;
+
+       case ldl_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_READ, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "1:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 56, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "2:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 48, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "3:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 40, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "4:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 32, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "5:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 24, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "6:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 16, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "7:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 8, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "0:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "1:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 56, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "2:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 48, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "3:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 40, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "4:     lb      %1, 0(%2)\n"
+                       "       dinsu   %0, %1, 32, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "5:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 24, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "6:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 16, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "7:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 8, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "0:     lb      %1, 0(%2)\n"
+                       "       dins    %0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .word   5b,8b\n"
+                       "       .word   6b,8b\n"
+                       "       .word   7b,8b\n"
+                       "       .word   0b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV));
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = rt;
+
+               MIPS_R2_STATS(loads);
+               break;
+
+       case ldr_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_READ, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "1:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 0, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "2:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 8, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "3:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 16, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "4:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 24, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "5:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 32, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "6:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 40, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "7:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 48, 8\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "0:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 56, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "1:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 0, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "2:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 8, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "3:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 16, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "4:     lb      %1, 0(%2)\n"
+                       "       dins   %0, %1, 24, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "5:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 32, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "6:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 40, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "7:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 48, 8\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "0:     lb      %1, 0(%2)\n"
+                       "       dinsu    %0, %1, 56, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li     %3,%4\n"
+                       "       j      9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word  1b,8b\n"
+                       "       .word  2b,8b\n"
+                       "       .word  3b,8b\n"
+                       "       .word  4b,8b\n"
+                       "       .word  5b,8b\n"
+                       "       .word  6b,8b\n"
+                       "       .word  7b,8b\n"
+                       "       .word  0b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV));
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = rt;
+
+               MIPS_R2_STATS(loads);
+               break;
+
+       case sdl_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "       dextu   %1, %0, 56, 8\n"
+                       "1:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 48, 8\n"
+                       "2:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 40, 8\n"
+                       "3:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 32, 8\n"
+                       "4:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 24, 8\n"
+                       "5:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 16, 8\n"
+                       "6:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 8, 8\n"
+                       "7:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 0, 8\n"
+                       "0:     sb      %1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "       dextu   %1, %0, 56, 8\n"
+                       "1:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 48, 8\n"
+                       "2:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 40, 8\n"
+                       "3:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 32, 8\n"
+                       "4:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 24, 8\n"
+                       "5:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 16, 8\n"
+                       "6:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 8, 8\n"
+                       "7:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 0, 8\n"
+                       "0:     sb      %1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .word   5b,8b\n"
+                       "       .word   6b,8b\n"
+                       "       .word   7b,8b\n"
+                       "       .word   0b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV)
+                       : "memory");
+
+               MIPS_R2_STATS(stores);
+               break;
+
+       case sdr_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               rt = regs->regs[MIPSInst_RT(inst)];
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGSEGV;
+                       break;
+               }
+               __asm__ __volatile__(
+                       "       .set    push\n"
+                       "       .set    reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+                       "       dext    %1, %0, 0, 8\n"
+                       "1:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 8, 8\n"
+                       "2:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 16, 8\n"
+                       "3:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dext    %1, %0, 24, 8\n"
+                       "4:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 32, 8\n"
+                       "5:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 40, 8\n"
+                       "6:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 48, 8\n"
+                       "7:     sb      %1, 0(%2)\n"
+                       "       daddiu  %2, %2, 1\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       dextu   %1, %0, 56, 8\n"
+                       "0:     sb      %1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+                       "       dext    %1, %0, 0, 8\n"
+                       "1:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 8, 8\n"
+                       "2:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 16, 8\n"
+                       "3:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dext    %1, %0, 24, 8\n"
+                       "4:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 32, 8\n"
+                       "5:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 40, 8\n"
+                       "6:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 48, 8\n"
+                       "7:     sb      %1, 0(%2)\n"
+                       "       andi    %1, %2, 0x7\n"
+                       "       beq     $0, %1, 9f\n"
+                       "       daddiu  %2, %2, -1\n"
+                       "       dextu   %1, %0, 56, 8\n"
+                       "0:     sb      %1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+                       "9:\n"
+                       "       .insn\n"
+                       "       .section        .fixup,\"ax\"\n"
+                       "8:     li      %3,%4\n"
+                       "       j       9b\n"
+                       "       .previous\n"
+                       "       .section        __ex_table,\"a\"\n"
+                       "       .word   1b,8b\n"
+                       "       .word   2b,8b\n"
+                       "       .word   3b,8b\n"
+                       "       .word   4b,8b\n"
+                       "       .word   5b,8b\n"
+                       "       .word   6b,8b\n"
+                       "       .word   7b,8b\n"
+                       "       .word   0b,8b\n"
+                       "       .previous\n"
+                       "       .set    pop\n"
+                       : "+&r"(rt), "=&r"(rs),
+                         "+&r"(vaddr), "+&r"(err)
+                       : "i"(SIGSEGV)
+                       : "memory");
+
+               MIPS_R2_STATS(stores);
+
+               break;
+       case ll_op:
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (vaddr & 0x3) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+               if (!access_ok(VERIFY_READ, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+
+               if (!cpu_has_rw_llb) {
+                       /*
+                        * An LL/SC block can't be safely emulated without
+                        * a Config5/LLB availability. So it's probably time to
+                        * kill our process before things get any worse. This is
+                        * because Config5/LLB allows us to use ERETNC so that
+                        * the LLAddr/LLB bit is not cleared when we return from
+                        * an exception. MIPS R2 LL/SC instructions trap with an
+                        * RI exception so once we emulate them here, we return
+                        * back to userland with ERETNC. That preserves the
+                        * LLAddr/LLB so the subsequent SC instruction will
+                        * succeed preserving the atomic semantics of the LL/SC
+                        * block. Without that, there is no safe way to emulate
+                        * an LL/SC block in MIPSR2 userland.
+                        */
+                       pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+                       err = SIGKILL;
+                       break;
+               }
+
+               __asm__ __volatile__(
+                       "1:\n"
+                       "ll     %0, 0(%2)\n"
+                       "2:\n"
+                       ".insn\n"
+                       ".section        .fixup,\"ax\"\n"
+                       "3:\n"
+                       "li     %1, %3\n"
+                       "j      2b\n"
+                       ".previous\n"
+                       ".section        __ex_table,\"a\"\n"
+                       ".word  1b, 3b\n"
+                       ".previous\n"
+                       : "=&r"(res), "+&r"(err)
+                       : "r"(vaddr), "i"(SIGSEGV)
+                       : "memory");
+
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = res;
+               MIPS_R2_STATS(llsc);
+
+               break;
+
+       case sc_op:
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (vaddr & 0x3) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+               if (!access_ok(VERIFY_WRITE, vaddr, 4)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+
+               if (!cpu_has_rw_llb) {
+                       /*
+                        * An LL/SC block can't be safely emulated without
+                        * a Config5/LLB availability. So it's probably time to
+                        * kill our process before things get any worse. This is
+                        * because Config5/LLB allows us to use ERETNC so that
+                        * the LLAddr/LLB bit is not cleared when we return from
+                        * an exception. MIPS R2 LL/SC instructions trap with an
+                        * RI exception so once we emulate them here, we return
+                        * back to userland with ERETNC. That preserves the
+                        * LLAddr/LLB so the subsequent SC instruction will
+                        * succeed preserving the atomic semantics of the LL/SC
+                        * block. Without that, there is no safe way to emulate
+                        * an LL/SC block in MIPSR2 userland.
+                        */
+                       pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+                       err = SIGKILL;
+                       break;
+               }
+
+               res = regs->regs[MIPSInst_RT(inst)];
+
+               __asm__ __volatile__(
+                       "1:\n"
+                       "sc     %0, 0(%2)\n"
+                       "2:\n"
+                       ".insn\n"
+                       ".section        .fixup,\"ax\"\n"
+                       "3:\n"
+                       "li     %1, %3\n"
+                       "j      2b\n"
+                       ".previous\n"
+                       ".section        __ex_table,\"a\"\n"
+                       ".word  1b, 3b\n"
+                       ".previous\n"
+                       : "+&r"(res), "+&r"(err)
+                       : "r"(vaddr), "i"(SIGSEGV));
+
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = res;
+
+               MIPS_R2_STATS(llsc);
+
+               break;
+
+       case lld_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (vaddr & 0x7) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+               if (!access_ok(VERIFY_READ, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+
+               if (!cpu_has_rw_llb) {
+                       /*
+                        * An LL/SC block can't be safely emulated without
+                        * a Config5/LLB availability. So it's probably time to
+                        * kill our process before things get any worse. This is
+                        * because Config5/LLB allows us to use ERETNC so that
+                        * the LLAddr/LLB bit is not cleared when we return from
+                        * an exception. MIPS R2 LL/SC instructions trap with an
+                        * RI exception so once we emulate them here, we return
+                        * back to userland with ERETNC. That preserves the
+                        * LLAddr/LLB so the subsequent SC instruction will
+                        * succeed preserving the atomic semantics of the LL/SC
+                        * block. Without that, there is no safe way to emulate
+                        * an LL/SC block in MIPSR2 userland.
+                        */
+                       pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+                       err = SIGKILL;
+                       break;
+               }
+
+               __asm__ __volatile__(
+                       "1:\n"
+                       "lld    %0, 0(%2)\n"
+                       "2:\n"
+                       ".insn\n"
+                       ".section        .fixup,\"ax\"\n"
+                       "3:\n"
+                       "li     %1, %3\n"
+                       "j      2b\n"
+                       ".previous\n"
+                       ".section        __ex_table,\"a\"\n"
+                       ".word  1b, 3b\n"
+                       ".previous\n"
+                       : "=&r"(res), "+&r"(err)
+                       : "r"(vaddr), "i"(SIGSEGV)
+                       : "memory");
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = res;
+
+               MIPS_R2_STATS(llsc);
+
+               break;
+
+       case scd_op:
+               if (config_enabled(CONFIG_32BIT)) {
+                   err = SIGILL;
+                   break;
+               }
+
+               vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+               if (vaddr & 0x7) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+               if (!access_ok(VERIFY_WRITE, vaddr, 8)) {
+                       current->thread.cp0_baduaddr = vaddr;
+                       err = SIGBUS;
+                       break;
+               }
+
+               if (!cpu_has_rw_llb) {
+                       /*
+                        * An LL/SC block can't be safely emulated without
+                        * a Config5/LLB availability. So it's probably time to
+                        * kill our process before things get any worse. This is
+                        * because Config5/LLB allows us to use ERETNC so that
+                        * the LLAddr/LLB bit is not cleared when we return from
+                        * an exception. MIPS R2 LL/SC instructions trap with an
+                        * RI exception so once we emulate them here, we return
+                        * back to userland with ERETNC. That preserves the
+                        * LLAddr/LLB so the subsequent SC instruction will
+                        * succeed preserving the atomic semantics of the LL/SC
+                        * block. Without that, there is no safe way to emulate
+                        * an LL/SC block in MIPSR2 userland.
+                        */
+                       pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+                       err = SIGKILL;
+                       break;
+               }
+
+               res = regs->regs[MIPSInst_RT(inst)];
+
+               __asm__ __volatile__(
+                       "1:\n"
+                       "scd    %0, 0(%2)\n"
+                       "2:\n"
+                       ".insn\n"
+                       ".section        .fixup,\"ax\"\n"
+                       "3:\n"
+                       "li     %1, %3\n"
+                       "j      2b\n"
+                       ".previous\n"
+                       ".section        __ex_table,\"a\"\n"
+                       ".word  1b, 3b\n"
+                       ".previous\n"
+                       : "+&r"(res), "+&r"(err)
+                       : "r"(vaddr), "i"(SIGSEGV));
+
+               if (MIPSInst_RT(inst) && !err)
+                       regs->regs[MIPSInst_RT(inst)] = res;
+
+               MIPS_R2_STATS(llsc);
+
+               break;
+       case pref_op:
+               /* skip it */
+               break;
+       default:
+               err = SIGILL;
+       }
+
+       /*
+        * Lets not return to userland just yet. It's constly and
+        * it's likely we have more R2 instructions to emulate
+        */
+       if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
+               regs->cp0_cause &= ~CAUSEF_BD;
+               err = get_user(inst, (u32 __user *)regs->cp0_epc);
+               if (!err)
+                       goto repeat;
+
+               if (err < 0)
+                       err = SIGSEGV;
+       }
+
+       if (err && (err != SIGEMT)) {
+               regs->regs[31] = r31;
+               regs->cp0_epc = epc;
+       }
+
+       /* Likely a MIPS R6 compatible instruction */
+       if (pass && (err == SIGILL))
+               err = 0;
+
+       return err;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int mipsr2_stats_show(struct seq_file *s, void *unused)
+{
+
+       seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
+       seq_printf(s, "movs\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.movs),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
+       seq_printf(s, "hilo\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
+       seq_printf(s, "muls\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.muls),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
+       seq_printf(s, "divs\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.divs),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
+       seq_printf(s, "dsps\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
+       seq_printf(s, "bops\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.bops),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
+       seq_printf(s, "traps\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.traps),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
+       seq_printf(s, "fpus\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
+       seq_printf(s, "loads\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.loads),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
+       seq_printf(s, "stores\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.stores),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
+       seq_printf(s, "llsc\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
+       seq_printf(s, "dsemul\t\t%ld\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
+                  (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
+       seq_printf(s, "jr\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
+       seq_printf(s, "bltzl\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
+       seq_printf(s, "bgezl\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
+       seq_printf(s, "bltzll\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
+       seq_printf(s, "bgezll\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
+       seq_printf(s, "bltzal\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
+       seq_printf(s, "bgezal\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
+       seq_printf(s, "beql\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
+       seq_printf(s, "bnel\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
+       seq_printf(s, "blezl\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
+       seq_printf(s, "bgtzl\t\t%ld\n",
+                  (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
+
+       return 0;
+}
+
+static int mipsr2_stats_clear_show(struct seq_file *s, void *unused)
+{
+       mipsr2_stats_show(s, unused);
+
+       __this_cpu_write((mipsr2emustats).movs, 0);
+       __this_cpu_write((mipsr2bdemustats).movs, 0);
+       __this_cpu_write((mipsr2emustats).hilo, 0);
+       __this_cpu_write((mipsr2bdemustats).hilo, 0);
+       __this_cpu_write((mipsr2emustats).muls, 0);
+       __this_cpu_write((mipsr2bdemustats).muls, 0);
+       __this_cpu_write((mipsr2emustats).divs, 0);
+       __this_cpu_write((mipsr2bdemustats).divs, 0);
+       __this_cpu_write((mipsr2emustats).dsps, 0);
+       __this_cpu_write((mipsr2bdemustats).dsps, 0);
+       __this_cpu_write((mipsr2emustats).bops, 0);
+       __this_cpu_write((mipsr2bdemustats).bops, 0);
+       __this_cpu_write((mipsr2emustats).traps, 0);
+       __this_cpu_write((mipsr2bdemustats).traps, 0);
+       __this_cpu_write((mipsr2emustats).fpus, 0);
+       __this_cpu_write((mipsr2bdemustats).fpus, 0);
+       __this_cpu_write((mipsr2emustats).loads, 0);
+       __this_cpu_write((mipsr2bdemustats).loads, 0);
+       __this_cpu_write((mipsr2emustats).stores, 0);
+       __this_cpu_write((mipsr2bdemustats).stores, 0);
+       __this_cpu_write((mipsr2emustats).llsc, 0);
+       __this_cpu_write((mipsr2bdemustats).llsc, 0);
+       __this_cpu_write((mipsr2emustats).dsemul, 0);
+       __this_cpu_write((mipsr2bdemustats).dsemul, 0);
+       __this_cpu_write((mipsr2bremustats).jrs, 0);
+       __this_cpu_write((mipsr2bremustats).bltzl, 0);
+       __this_cpu_write((mipsr2bremustats).bgezl, 0);
+       __this_cpu_write((mipsr2bremustats).bltzll, 0);
+       __this_cpu_write((mipsr2bremustats).bgezll, 0);
+       __this_cpu_write((mipsr2bremustats).bltzal, 0);
+       __this_cpu_write((mipsr2bremustats).bgezal, 0);
+       __this_cpu_write((mipsr2bremustats).beql, 0);
+       __this_cpu_write((mipsr2bremustats).bnel, 0);
+       __this_cpu_write((mipsr2bremustats).blezl, 0);
+       __this_cpu_write((mipsr2bremustats).bgtzl, 0);
+
+       return 0;
+}
+
+static int mipsr2_stats_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mipsr2_stats_show, inode->i_private);
+}
+
+static int mipsr2_stats_clear_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, mipsr2_stats_clear_show, inode->i_private);
+}
+
+static const struct file_operations mipsr2_emul_fops = {
+       .open                   = mipsr2_stats_open,
+       .read                   = seq_read,
+       .llseek                 = seq_lseek,
+       .release                = single_release,
+};
+
+static const struct file_operations mipsr2_clear_fops = {
+       .open                   = mipsr2_stats_clear_open,
+       .read                   = seq_read,
+       .llseek                 = seq_lseek,
+       .release                = single_release,
+};
+
+
+static int __init mipsr2_init_debugfs(void)
+{
+       extern struct dentry    *mips_debugfs_dir;
+       struct dentry           *mipsr2_emul;
+
+       if (!mips_debugfs_dir)
+               return -ENODEV;
+
+       mipsr2_emul = debugfs_create_file("r2_emul_stats", S_IRUGO,
+                                         mips_debugfs_dir, NULL,
+                                         &mipsr2_emul_fops);
+       if (!mipsr2_emul)
+               return -ENOMEM;
+
+       mipsr2_emul = debugfs_create_file("r2_emul_stats_clear", S_IRUGO,
+                                         mips_debugfs_dir, NULL,
+                                         &mipsr2_clear_fops);
+       if (!mipsr2_emul)
+               return -ENOMEM;
+
+       return 0;
+}
+
+device_initcall(mipsr2_init_debugfs);
+
+#endif /* CONFIG_DEBUG_FS */
index 17eaf0cf760c60eb08fad9d666877dc5606ee61b..291af0b5c4828adaa22ff0fc065f37c6a094fd85 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/mm.h>
 #include <asm/uaccess.h>
 #include <asm/ftrace.h>
+#include <asm/fpu.h>
+#include <asm/msa.h>
 
 extern void *__bzero(void *__s, size_t __count);
 extern long __strncpy_from_kernel_nocheck_asm(char *__to,
@@ -31,6 +33,14 @@ extern long __strnlen_kernel_asm(const char *s);
 extern long __strnlen_user_nocheck_asm(const char *s);
 extern long __strnlen_user_asm(const char *s);
 
+/*
+ * Core architecture code
+ */
+EXPORT_SYMBOL_GPL(_save_fp);
+#ifdef CONFIG_CPU_HAS_MSA
+EXPORT_SYMBOL_GPL(_save_msa);
+#endif
+
 /*
  * String functions
  */
@@ -67,11 +77,13 @@ EXPORT_SYMBOL(__strnlen_kernel_asm);
 EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
 EXPORT_SYMBOL(__strnlen_user_asm);
 
+#ifndef CONFIG_CPU_MIPSR6
 EXPORT_SYMBOL(csum_partial);
 EXPORT_SYMBOL(csum_partial_copy_nocheck);
 EXPORT_SYMBOL(__csum_partial_copy_kernel);
 EXPORT_SYMBOL(__csum_partial_copy_to_user);
 EXPORT_SYMBOL(__csum_partial_copy_from_user);
+#endif
 
 EXPORT_SYMBOL(invalid_pte_table);
 #ifdef CONFIG_FUNCTION_TRACER
index f6547680c81cd7db68f5c77b19131a6f9fec05d4..423ae83af1fb7043a1daff5d06a079658446de5a 100644 (file)
        /*
         * check if we need to save FPU registers
         */
-       PTR_L   t3, TASK_THREAD_INFO(a0)
-       LONG_L  t0, TI_FLAGS(t3)
-       li      t1, _TIF_USEDFPU
-       and     t2, t0, t1
-       beqz    t2, 1f
-       nor     t1, zero, t1
-
-       and     t0, t0, t1
-       LONG_S  t0, TI_FLAGS(t3)
+       .set push
+       .set noreorder
+       beqz    a3, 1f
+        PTR_L  t3, TASK_THREAD_INFO(a0)
+       .set pop
 
        /*
         * clear saved user stack CU1 bit
        .set pop
 1:
 
-       /* check if we need to save COP2 registers */
-       PTR_L   t2, TASK_THREAD_INFO(a0)
-       LONG_L  t0, ST_OFF(t2)
-       bbit0   t0, 30, 1f
-
-       /* Disable COP2 in the stored process state */
-       li      t1, ST0_CU2
-       xor     t0, t1
-       LONG_S  t0, ST_OFF(t2)
-
-       /* Enable COP2 so we can save it */
-       mfc0    t0, CP0_STATUS
-       or      t0, t1
-       mtc0    t0, CP0_STATUS
-
-       /* Save COP2 */
-       daddu   a0, THREAD_CP2
-       jal octeon_cop2_save
-       dsubu   a0, THREAD_CP2
-
-       /* Disable COP2 now that we are done */
-       mfc0    t0, CP0_STATUS
-       li      t1, ST0_CU2
-       xor     t0, t1
-       mtc0    t0, CP0_STATUS
-
-1:
 #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
        /* Check if we need to store CVMSEG state */
-       mfc0    t0, $11,7       /* CvmMemCtl */
+       dmfc0   t0, $11,7       /* CvmMemCtl */
        bbit0   t0, 6, 3f       /* Is user access enabled? */
 
        /* Store the CVMSEG state */
        .set reorder
 
        /* Disable access to CVMSEG */
-       mfc0    t0, $11,7       /* CvmMemCtl */
+       dmfc0   t0, $11,7       /* CvmMemCtl */
        xori    t0, t0, 0x40    /* Bit 6 is CVMSEG user enable */
-       mtc0    t0, $11,7       /* CvmMemCtl */
+       dmtc0   t0, $11,7       /* CvmMemCtl */
 #endif
 3:
 
  * void octeon_cop2_save(struct octeon_cop2_state *a0)
  */
        .align  7
+       .set push
+       .set noreorder
        LEAF(octeon_cop2_save)
 
        dmfc0   t9, $9,7        /* CvmCtl register. */
        dmfc2   t2, 0x0200
        sd      t0, OCTEON_CP2_CRC_IV(a0)
        sd      t1, OCTEON_CP2_CRC_LENGTH(a0)
-       sd      t2, OCTEON_CP2_CRC_POLY(a0)
        /* Skip next instructions if CvmCtl[NODFA_CP2] set */
        bbit1   t9, 28, 1f
+        sd     t2, OCTEON_CP2_CRC_POLY(a0)
 
        /* Save the LLM state */
        dmfc2   t0, 0x0402
        dmfc2   t1, 0x040A
        sd      t0, OCTEON_CP2_LLM_DAT(a0)
-       sd      t1, OCTEON_CP2_LLM_DAT+8(a0)
 
 1:     bbit1   t9, 26, 3f      /* done if CvmCtl[NOCRYPTO] set */
+        sd     t1, OCTEON_CP2_LLM_DAT+8(a0)
 
        /* Save the COP2 crypto state */
        /* this part is mostly common to both pass 1 and later revisions */
        sd      t2, OCTEON_CP2_AES_KEY+16(a0)
        dmfc2   t2, 0x0101
        sd      t3, OCTEON_CP2_AES_KEY+24(a0)
-       mfc0    t3, $15,0       /* Get the processor ID register */
+       mfc0    v0, $15,0       /* Get the processor ID register */
        sd      t0, OCTEON_CP2_AES_KEYLEN(a0)
-       li      t0, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
+       li      v1, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
        sd      t1, OCTEON_CP2_AES_RESULT(a0)
-       sd      t2, OCTEON_CP2_AES_RESULT+8(a0)
        /* Skip to the Pass1 version of the remainder of the COP2 state */
-       beq     t3, t0, 2f
+       beq     v0, v1, 2f
+        sd     t2, OCTEON_CP2_AES_RESULT+8(a0)
 
        /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
        dmfc2   t1, 0x0240
        dmfc2   t2, 0x0241
+       ori     v1, v1, 0x9500 /* lowest OCTEON III PrId*/
        dmfc2   t3, 0x0242
+       subu    v1, v0, v1 /* prid - lowest OCTEON III PrId */
        dmfc2   t0, 0x0243
        sd      t1, OCTEON_CP2_HSH_DATW(a0)
        dmfc2   t1, 0x0244
        sd      t1, OCTEON_CP2_GFM_MULT+8(a0)
        sd      t2, OCTEON_CP2_GFM_POLY(a0)
        sd      t3, OCTEON_CP2_GFM_RESULT(a0)
-       sd      t0, OCTEON_CP2_GFM_RESULT+8(a0)
+       bltz    v1, 4f
+        sd     t0, OCTEON_CP2_GFM_RESULT+8(a0)
+       /* OCTEON III things*/
+       dmfc2   t0, 0x024F
+       dmfc2   t1, 0x0050
+       sd      t0, OCTEON_CP2_SHA3(a0)
+       sd      t1, OCTEON_CP2_SHA3+8(a0)
+4:
        jr      ra
+        nop
 
 2:     /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
        dmfc2   t3, 0x0040
 
 3:     /* pass 1 or CvmCtl[NOCRYPTO] set */
        jr      ra
+        nop
        END(octeon_cop2_save)
+       .set pop
 
 /*
  * void octeon_cop2_restore(struct octeon_cop2_state *a0)
        ld      t2, OCTEON_CP2_AES_RESULT+8(a0)
        mfc0    t3, $15,0       /* Get the processor ID register */
        dmtc2   t0, 0x0110
-       li      t0, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
+       li      v0, 0x000d0000  /* This is the processor ID of Octeon Pass1 */
        dmtc2   t1, 0x0100
-       bne     t0, t3, 3f      /* Skip the next stuff for non-pass1 */
+       bne     v0, t3, 3f      /* Skip the next stuff for non-pass1 */
         dmtc2  t2, 0x0101
 
        /* this code is specific for pass 1 */
 
 3:     /* this is post-pass1 code */
        ld      t2, OCTEON_CP2_HSH_DATW(a0)
+       ori     v0, v0, 0x9500 /* lowest OCTEON III PrId*/
        ld      t0, OCTEON_CP2_HSH_DATW+8(a0)
        ld      t1, OCTEON_CP2_HSH_DATW+16(a0)
        dmtc2   t2, 0x0240
        dmtc2   t2, 0x0259
        ld      t2, OCTEON_CP2_GFM_RESULT+8(a0)
        dmtc2   t0, 0x025E
+       subu    v0, t3, v0 /* prid - lowest OCTEON III PrId */
        dmtc2   t1, 0x025A
-       dmtc2   t2, 0x025B
-
+       bltz    v0, done_restore
+        dmtc2  t2, 0x025B
+       /* OCTEON III things*/
+       ld      t0, OCTEON_CP2_SHA3(a0)
+       ld      t1, OCTEON_CP2_SHA3+8(a0)
+       dmtc2   t0, 0x0051
+       dmtc2   t1, 0x0050
 done_restore:
        jr      ra
         nop
@@ -450,18 +440,23 @@ done_restore:
  * void octeon_mult_save()
  * sp is assumed to point to a struct pt_regs
  *
- * NOTE: This is called in SAVE_SOME in stackframe.h. It can only
- *      safely modify k0 and k1.
+ * NOTE: This is called in SAVE_TEMP in stackframe.h. It can
+ *       safely modify v1,k0, k1,$10-$15, and $24.  It will
+ *      be overwritten with a processor specific version of the code.
  */
-       .align  7
+       .p2align 7
        .set push
        .set noreorder
        LEAF(octeon_mult_save)
-       dmfc0   k0, $9,7        /* CvmCtl register. */
-       bbit1   k0, 27, 1f      /* Skip CvmCtl[NOMUL] */
+       jr      ra
         nop
+       .space 30 * 4, 0
+octeon_mult_save_end:
+       EXPORT(octeon_mult_save_end)
+       END(octeon_mult_save)
 
-       /* Save the multiplier state */
+       LEAF(octeon_mult_save2)
+       /* Save the multiplier state OCTEON II and earlier*/
        v3mulu  k0, $0, $0
        v3mulu  k1, $0, $0
        sd      k0, PT_MTP(sp)        /* PT_MTP    has P0 */
@@ -476,44 +471,107 @@ done_restore:
        sd      k0, PT_MPL+8(sp)      /* PT_MPL+8  has MPL1 */
        jr      ra
         sd     k1, PT_MPL+16(sp)     /* PT_MPL+16 has MPL2 */
-
-1:     /* Resume here if CvmCtl[NOMUL] */
+octeon_mult_save2_end:
+       EXPORT(octeon_mult_save2_end)
+       END(octeon_mult_save2)
+
+       LEAF(octeon_mult_save3)
+       /* Save the multiplier state OCTEON III */
+       v3mulu  $10, $0, $0             /* read P0 */
+       v3mulu  $11, $0, $0             /* read P1 */
+       v3mulu  $12, $0, $0             /* read P2 */
+       sd      $10, PT_MTP+(0*8)(sp)   /* store P0 */
+       v3mulu  $10, $0, $0             /* read P3 */
+       sd      $11, PT_MTP+(1*8)(sp)   /*  store P1 */
+       v3mulu  $11, $0, $0             /* read P4 */
+       sd      $12, PT_MTP+(2*8)(sp)   /* store P2 */
+       ori     $13, $0, 1
+       v3mulu  $12, $0, $0             /* read P5 */
+       sd      $10, PT_MTP+(3*8)(sp)   /* store P3 */
+       v3mulu  $13, $13, $0            /* P4-P0 = MPL5-MPL1, $13 = MPL0 */
+       sd      $11, PT_MTP+(4*8)(sp)   /* store P4 */
+       v3mulu  $10, $0, $0             /* read MPL1 */
+       sd      $12, PT_MTP+(5*8)(sp)   /* store P5 */
+       v3mulu  $11, $0, $0             /* read MPL2 */
+       sd      $13, PT_MPL+(0*8)(sp)   /* store MPL0 */
+       v3mulu  $12, $0, $0             /* read MPL3 */
+       sd      $10, PT_MPL+(1*8)(sp)   /* store MPL1 */
+       v3mulu  $10, $0, $0             /* read MPL4 */
+       sd      $11, PT_MPL+(2*8)(sp)   /* store MPL2 */
+       v3mulu  $11, $0, $0             /* read MPL5 */
+       sd      $12, PT_MPL+(3*8)(sp)   /* store MPL3 */
+       sd      $10, PT_MPL+(4*8)(sp)   /* store MPL4 */
        jr      ra
-       END(octeon_mult_save)
+        sd     $11, PT_MPL+(5*8)(sp)   /* store MPL5 */
+octeon_mult_save3_end:
+       EXPORT(octeon_mult_save3_end)
+       END(octeon_mult_save3)
        .set pop
 
 /*
  * void octeon_mult_restore()
  * sp is assumed to point to a struct pt_regs
  *
- * NOTE: This is called in RESTORE_SOME in stackframe.h.
+ * NOTE: This is called in RESTORE_TEMP in stackframe.h.
  */
-       .align  7
+       .p2align 7
        .set push
        .set noreorder
        LEAF(octeon_mult_restore)
-       dmfc0   k1, $9,7                /* CvmCtl register. */
-       ld      v0, PT_MPL(sp)          /* MPL0 */
-       ld      v1, PT_MPL+8(sp)        /* MPL1 */
-       ld      k0, PT_MPL+16(sp)       /* MPL2 */
-       bbit1   k1, 27, 1f              /* Skip CvmCtl[NOMUL] */
-       /* Normally falls through, so no time wasted here */
-       nop
+       jr      ra
+        nop
+       .space 30 * 4, 0
+octeon_mult_restore_end:
+       EXPORT(octeon_mult_restore_end)
+       END(octeon_mult_restore)
 
+       LEAF(octeon_mult_restore2)
+       ld      v0, PT_MPL(sp)          /* MPL0 */
+       ld      v1, PT_MPL+8(sp)        /* MPL1 */
+       ld      k0, PT_MPL+16(sp)       /* MPL2 */
        /* Restore the multiplier state */
-       ld      k1, PT_MTP+16(sp)       /* P2 */
-       MTM0    v0                      /* MPL0 */
+       ld      k1, PT_MTP+16(sp)       /* P2 */
+       mtm0    v0                      /* MPL0 */
        ld      v0, PT_MTP+8(sp)        /* P1 */
-       MTM1    v1                      /* MPL1 */
-       ld      v1, PT_MTP(sp)          /* P0 */
-       MTM2    k0                      /* MPL2 */
-       MTP2    k1                      /* P2 */
-       MTP1    v0                      /* P1 */
+       mtm1    v1                      /* MPL1 */
+       ld      v1, PT_MTP(sp)          /* P0 */
+       mtm2    k0                      /* MPL2 */
+       mtp2    k1                      /* P2 */
+       mtp1    v0                      /* P1 */
        jr      ra
-        MTP0   v1                      /* P0 */
-
-1:     /* Resume here if CvmCtl[NOMUL] */
+        mtp0   v1                      /* P0 */
+octeon_mult_restore2_end:
+       EXPORT(octeon_mult_restore2_end)
+       END(octeon_mult_restore2)
+
+       LEAF(octeon_mult_restore3)
+       ld      $12, PT_MPL+(0*8)(sp)   /* read MPL0 */
+       ld      $13, PT_MPL+(3*8)(sp)   /* read MPL3 */
+       ld      $10, PT_MPL+(1*8)(sp)   /* read MPL1 */
+       ld      $11, PT_MPL+(4*8)(sp)   /* read MPL4 */
+       .word   0x718d0008
+       /* mtm0 $12, $13                   restore MPL0 and MPL3 */
+       ld      $12, PT_MPL+(2*8)(sp)   /* read MPL2 */
+       .word   0x714b000c
+       /* mtm1 $10, $11                   restore MPL1 and MPL4 */
+       ld      $13, PT_MPL+(5*8)(sp)   /* read MPL5 */
+       ld      $10, PT_MTP+(0*8)(sp)   /* read P0 */
+       ld      $11, PT_MTP+(3*8)(sp)   /* read P3 */
+       .word   0x718d000d
+       /* mtm2 $12, $13                   restore MPL2 and MPL5 */
+       ld      $12, PT_MTP+(1*8)(sp)   /* read P1 */
+       .word   0x714b0009
+       /* mtp0 $10, $11                   restore P0 and P3 */
+       ld      $13, PT_MTP+(4*8)(sp)   /* read P4 */
+       ld      $10, PT_MTP+(2*8)(sp)   /* read P2 */
+       ld      $11, PT_MTP+(5*8)(sp)   /* read P5 */
+       .word   0x718d000a
+       /* mtp1 $12, $13                   restore P1 and P4 */
        jr      ra
-        nop
-       END(octeon_mult_restore)
+       .word   0x714b000b
+       /* mtp2 $10, $11                   restore P2 and P5 */
+
+octeon_mult_restore3_end:
+       EXPORT(octeon_mult_restore3_end)
+       END(octeon_mult_restore3)
        .set pop
index 097fc8d14e4225288733bac25f6ea17c6737d1c8..130af7d26a9c5de2fe2ec5fdd5969d9bd4a5f42a 100644 (file)
@@ -82,7 +82,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                seq_printf(m, "]\n");
        }
 
-       seq_printf(m, "isa\t\t\t: mips1");
+       seq_printf(m, "isa\t\t\t:"); 
+       if (cpu_has_mips_r1)
+               seq_printf(m, " mips1");
        if (cpu_has_mips_2)
                seq_printf(m, "%s", " mips2");
        if (cpu_has_mips_3)
@@ -95,10 +97,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                seq_printf(m, "%s", " mips32r1");
        if (cpu_has_mips32r2)
                seq_printf(m, "%s", " mips32r2");
+       if (cpu_has_mips32r6)
+               seq_printf(m, "%s", " mips32r6");
        if (cpu_has_mips64r1)
                seq_printf(m, "%s", " mips64r1");
        if (cpu_has_mips64r2)
                seq_printf(m, "%s", " mips64r2");
+       if (cpu_has_mips64r6)
+               seq_printf(m, "%s", " mips64r6");
        seq_printf(m, "\n");
 
        seq_printf(m, "ASEs implemented\t:");
index 85bff5d513e5b42ae483e414c14a4a844793b9a1..bf85cc180d9105b7c605d049ccfdb352c76cc066 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/completion.h>
 #include <linux/kallsyms.h>
 #include <linux/random.h>
+#include <linux/prctl.h>
 
 #include <asm/asm.h>
 #include <asm/bootinfo.h>
@@ -562,3 +563,98 @@ void arch_trigger_all_cpu_backtrace(bool include_self)
 {
        smp_call_function(arch_dump_stack, NULL, 1);
 }
+
+int mips_get_process_fp_mode(struct task_struct *task)
+{
+       int value = 0;
+
+       if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
+               value |= PR_FP_MODE_FR;
+       if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
+               value |= PR_FP_MODE_FRE;
+
+       return value;
+}
+
+int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+{
+       const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
+       unsigned long switch_count;
+       struct task_struct *t;
+
+       /* Check the value is valid */
+       if (value & ~known_bits)
+               return -EOPNOTSUPP;
+
+       /* Avoid inadvertently triggering emulation */
+       if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
+           !(current_cpu_data.fpu_id & MIPS_FPIR_F64))
+               return -EOPNOTSUPP;
+       if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
+               return -EOPNOTSUPP;
+
+       /* FR = 0 not supported in MIPS R6 */
+       if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
+               return -EOPNOTSUPP;
+
+       /* Save FP & vector context, then disable FPU & MSA */
+       if (task->signal == current->signal)
+               lose_fpu(1);
+
+       /* Prevent any threads from obtaining live FP context */
+       atomic_set(&task->mm->context.fp_mode_switching, 1);
+       smp_mb__after_atomic();
+
+       /*
+        * If there are multiple online CPUs then wait until all threads whose
+        * FP mode is about to change have been context switched. This approach
+        * allows us to only worry about whether an FP mode switch is in
+        * progress when FP is first used in a tasks time slice. Pretty much all
+        * of the mode switch overhead can thus be confined to cases where mode
+        * switches are actually occuring. That is, to here. However for the
+        * thread performing the mode switch it may take a while...
+        */
+       if (num_online_cpus() > 1) {
+               spin_lock_irq(&task->sighand->siglock);
+
+               for_each_thread(task, t) {
+                       if (t == current)
+                               continue;
+
+                       switch_count = t->nvcsw + t->nivcsw;
+
+                       do {
+                               spin_unlock_irq(&task->sighand->siglock);
+                               cond_resched();
+                               spin_lock_irq(&task->sighand->siglock);
+                       } while ((t->nvcsw + t->nivcsw) == switch_count);
+               }
+
+               spin_unlock_irq(&task->sighand->siglock);
+       }
+
+       /*
+        * There are now no threads of the process with live FP context, so it
+        * is safe to proceed with the FP mode switch.
+        */
+       for_each_thread(task, t) {
+               /* Update desired FP register width */
+               if (value & PR_FP_MODE_FR) {
+                       clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
+               } else {
+                       set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
+                       clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
+               }
+
+               /* Update desired FP single layout */
+               if (value & PR_FP_MODE_FRE)
+                       set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
+               else
+                       clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
+       }
+
+       /* Allow threads to use FP again */
+       atomic_set(&task->mm->context.fp_mode_switching, 0);
+
+       return 0;
+}
index 6c160c67984c014e53a3a068698fdef58a9b9345..676c5030a953bf9cca5ad038a7526d3b94ce372d 100644 (file)
@@ -34,7 +34,7 @@
        .endm
 
        .set    noreorder
-       .set    arch=r4000
+       .set    MIPS_ISA_ARCH_LEVEL_RAW
 
 LEAF(_save_fp_context)
        .set    push
@@ -42,7 +42,8 @@ LEAF(_save_fp_context)
        cfc1    t1, fcr31
        .set    pop
 
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        .set    push
        SET_HARDFLOAT
 #ifdef CONFIG_CPU_MIPS32_R2
@@ -105,10 +106,12 @@ LEAF(_save_fp_context32)
        SET_HARDFLOAT
        cfc1    t1, fcr31
 
+#ifndef CONFIG_CPU_MIPS64_R6
        mfc0    t0, CP0_STATUS
        sll     t0, t0, 5
        bgez    t0, 1f                  # skip storing odd if FR=0
         nop
+#endif
 
        /* Store the 16 odd double precision registers */
        EX      sdc1 $f1, SC32_FPREGS+8(a0)
@@ -163,7 +166,8 @@ LEAF(_save_fp_context32)
 LEAF(_restore_fp_context)
        EX      lw t1, SC_FPC_CSR(a0)
 
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)  || \
+               defined(CONFIG_CPU_MIPS32_R6)
        .set    push
        SET_HARDFLOAT
 #ifdef CONFIG_CPU_MIPS32_R2
@@ -223,10 +227,12 @@ LEAF(_restore_fp_context32)
        SET_HARDFLOAT
        EX      lw t1, SC32_FPC_CSR(a0)
 
+#ifndef CONFIG_CPU_MIPS64_R6
        mfc0    t0, CP0_STATUS
        sll     t0, t0, 5
        bgez    t0, 1f                  # skip loading odd if FR=0
         nop
+#endif
 
        EX      ldc1 $f1, SC32_FPREGS+8(a0)
        EX      ldc1 $f3, SC32_FPREGS+24(a0)
index 64591e671878f41d9c4d0551806783b65578eb23..3b1a36f13a7dd915c235f11e073c805264f90960 100644 (file)
  * Save a thread's fp context.
  */
 LEAF(_save_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        mfc0    t0, CP0_STATUS
 #endif
        fpu_save_double a0 t0 t1                # clobbers t1
@@ -126,7 +127,8 @@ LEAF(_save_fp)
  * Restore a thread's fp context.
  */
 LEAF(_restore_fp)
-#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) || \
+               defined(CONFIG_CPU_MIPS32_R6)
        mfc0    t0, CP0_STATUS
 #endif
        fpu_restore_double a0 t0 t1             # clobbers t1
@@ -240,9 +242,9 @@ LEAF(_init_fpu)
        mtc1    t1, $f30
        mtc1    t1, $f31
 
-#ifdef CONFIG_CPU_MIPS32_R2
+#if defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS32_R6)
        .set    push
-       .set    mips32r2
+       .set    MIPS_ISA_LEVEL_RAW
        .set    fp=64
        sll     t0, t0, 5                       # is Status.FR set?
        bgez    t0, 1f                          # no: skip setting upper 32b
@@ -280,9 +282,9 @@ LEAF(_init_fpu)
        mthc1   t1, $f30
        mthc1   t1, $f31
 1:     .set    pop
-#endif /* CONFIG_CPU_MIPS32_R2 */
+#endif /* CONFIG_CPU_MIPS32_R2 || CONFIG_CPU_MIPS32_R6 */
 #else
-       .set    arch=r4000
+       .set    MIPS_ISA_ARCH_LEVEL_RAW
        dmtc1   t1, $f0
        dmtc1   t1, $f2
        dmtc1   t1, $f4
index 67f2495def1cd18615210c32e2cc111ea66c8fe0..d1168d7c31e8ef37c51568b93cf676e9a6c3ef81 100644 (file)
@@ -208,6 +208,7 @@ void spram_config(void)
        case CPU_INTERAPTIV:
        case CPU_PROAPTIV:
        case CPU_P5600:
+       case CPU_QEMU_GENERIC:
                config0 = read_c0_config();
                /* FIXME: addresses are Malta specific */
                if (config0 & (1<<24)) {
index 604b558809c4c4474bfd953507ca7e54af5e9eca..53a7ef9a8f320c5b14c5abb2d3e1518c0ad7f756 100644 (file)
@@ -136,7 +136,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
                : "memory");
        } else if (cpu_has_llsc) {
                __asm__ __volatile__ (
-               "       .set    arch=r4000                              \n"
+               "       .set    "MIPS_ISA_ARCH_LEVEL"                   \n"
                "       li      %[err], 0                               \n"
                "1:     ll      %[old], (%[addr])                       \n"
                "       move    %[tmp], %[new]                          \n"
index c3b41e24c05a47337509b9579d5b1302ba6f6e80..33984c04b60b710516f1b0bfb88aa52aaa04629f 100644 (file)
@@ -46,6 +46,7 @@
 #include <asm/fpu.h>
 #include <asm/fpu_emulator.h>
 #include <asm/idle.h>
+#include <asm/mips-r2-to-r6-emul.h>
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
 #include <asm/module.h>
@@ -837,7 +838,7 @@ out:
        exception_exit(prev_state);
 }
 
-static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
        const char *str)
 {
        siginfo_t info;
@@ -1027,7 +1028,34 @@ asmlinkage void do_ri(struct pt_regs *regs)
        unsigned int opcode = 0;
        int status = -1;
 
+       /*
+        * Avoid any kernel code. Just emulate the R2 instruction
+        * as quickly as possible.
+        */
+       if (mipsr2_emulation && cpu_has_mips_r6 &&
+           likely(user_mode(regs))) {
+               if (likely(get_user(opcode, epc) >= 0)) {
+                       status = mipsr2_decoder(regs, opcode);
+                       switch (status) {
+                       case 0:
+                       case SIGEMT:
+                               task_thread_info(current)->r2_emul_return = 1;
+                               return;
+                       case SIGILL:
+                               goto no_r2_instr;
+                       default:
+                               process_fpemu_return(status,
+                                                    &current->thread.cp0_baduaddr);
+                               task_thread_info(current)->r2_emul_return = 1;
+                               return;
+                       }
+               }
+       }
+
+no_r2_instr:
+
        prev_state = exception_enter();
+
        if (notify_die(DIE_RI, "RI Fault", regs, 0, regs_to_trapnr(regs),
                       SIGILL) == NOTIFY_STOP)
                goto out;
@@ -1134,10 +1162,29 @@ static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
        return NOTIFY_OK;
 }
 
+static int wait_on_fp_mode_switch(atomic_t *p)
+{
+       /*
+        * The FP mode for this task is currently being switched. That may
+        * involve modifications to the format of this tasks FP context which
+        * make it unsafe to proceed with execution for the moment. Instead,
+        * schedule some other task.
+        */
+       schedule();
+       return 0;
+}
+
 static int enable_restore_fp_context(int msa)
 {
        int err, was_fpu_owner, prior_msa;
 
+       /*
+        * If an FP mode switch is currently underway, wait for it to
+        * complete before proceeding.
+        */
+       wait_on_atomic_t(&current->mm->context.fp_mode_switching,
+                        wait_on_fp_mode_switch, TASK_KILLABLE);
+
        if (!used_math()) {
                /* First time FP context user. */
                preempt_disable();
@@ -1541,6 +1588,7 @@ static inline void parity_protection_init(void)
        case CPU_INTERAPTIV:
        case CPU_PROAPTIV:
        case CPU_P5600:
+       case CPU_QEMU_GENERIC:
                {
 #define ERRCTL_PE      0x80000000
 #define ERRCTL_L2P     0x00800000
@@ -1630,7 +1678,7 @@ asmlinkage void cache_parity_error(void)
        printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
               reg_val & (1<<30) ? "secondary" : "primary",
               reg_val & (1<<31) ? "data" : "insn");
-       if (cpu_has_mips_r2 &&
+       if ((cpu_has_mips_r2_r6) &&
            ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
                pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
                        reg_val & (1<<29) ? "ED " : "",
@@ -1670,7 +1718,7 @@ asmlinkage void do_ftlb(void)
        unsigned int reg_val;
 
        /* For the moment, report the problem and hang. */
-       if (cpu_has_mips_r2 &&
+       if ((cpu_has_mips_r2_r6) &&
            ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
                pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
                       read_c0_ecc());
@@ -1959,7 +2007,7 @@ static void configure_hwrena(void)
 {
        unsigned int hwrena = cpu_hwrena_impl_bits;
 
-       if (cpu_has_mips_r2)
+       if (cpu_has_mips_r2_r6)
                hwrena |= 0x0000000f;
 
        if (!noulri && cpu_has_userlocal)
@@ -2003,7 +2051,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
         *  o read IntCtl.IPTI to determine the timer interrupt
         *  o read IntCtl.IPPCI to determine the performance counter interrupt
         */
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2_r6) {
                cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
                cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
                cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -2094,7 +2142,7 @@ void __init trap_init(void)
 #else
         ebase = CKSEG0;
 #endif
-               if (cpu_has_mips_r2)
+               if (cpu_has_mips_r2_r6)
                        ebase += (read_c0_ebase() & 0x3ffff000);
        }
 
index e11906dff8850fc277b14985eafdbbb4d2c3ece9..bbb69695a0a10765f4c6bdf300da304667c021f5 100644 (file)
@@ -129,6 +129,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadW(addr, value, res)   \
                __asm__ __volatile__ (                      \
                        "1:\t"user_lwl("%0", "(%2)")"\n"    \
@@ -146,6 +147,39 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no lwl instruction */
+#define     LoadW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n"                      \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_lb("%0", "0(%2)")"\n\t"    \
+                       "2:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:"user_lbu("$1", "3(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #define     LoadHWU(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -169,6 +203,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadWU(addr, value, res)  \
                __asm__ __volatile__ (                      \
                        "1:\t"user_lwl("%0", "(%2)")"\n"    \
@@ -206,6 +241,87 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has not lwl and ldl instructions */
+#define            LoadWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_lbu("%0", "0(%2)")"\n\t"   \
+                       "2:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:"user_lbu("$1", "3(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:lb\t%0, 0(%2)\n\t"               \
+                       "2:lbu\t $1, 1(%2)\n\t"             \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "3:lbu\t$1, 2(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "4:lbu\t$1, 3(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "5:lbu\t$1, 4(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "6:lbu\t$1, 5(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "7:lbu\t$1, 6(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "8:lbu\t$1, 7(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n\t"                     \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 
 #define     StoreHW(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -228,6 +344,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=r" (res)                        \
                        : "r" (value), "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     StoreW(addr, value, res)  \
                __asm__ __volatile__ (                      \
                        "1:\t"user_swl("%1", "(%2)")"\n"    \
@@ -263,9 +380,82 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                : "=r" (res)                                \
                : "r" (value), "r" (addr), "i" (-EFAULT));
-#endif
+#else
+/* MIPSR6 has no swl and sdl instructions */
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_sb("%1", "3(%2)")"\n\t"    \
+                       "srl\t$1, %1, 0x8\n\t"              \
+                       "2:"user_sb("$1", "2(%2)")"\n\t"    \
+                       "srl\t$1, $1,  0x8\n\t"             \
+                       "3:"user_sb("$1", "1(%2)")"\n\t"    \
+                       "srl\t$1, $1, 0x8\n\t"              \
+                       "4:"user_sb("$1", "0(%2)")"\n\t"    \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:sb\t%1, 7(%2)\n\t"               \
+                       "dsrl\t$1, %1, 0x8\n\t"             \
+                       "2:sb\t$1, 6(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "3:sb\t$1, 5(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "4:sb\t$1, 4(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "5:sb\t$1, 3(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "6:sb\t$1, 2(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "7:sb\t$1, 1(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "8:sb\t$1, 0(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+#endif /* CONFIG_CPU_MIPSR6 */
+
+#else /* __BIG_ENDIAN */
 
-#ifdef __LITTLE_ENDIAN
 #define     LoadHW(addr, value, res)  \
                __asm__ __volatile__ (".set\tnoat\n"        \
                        "1:\t"user_lb("%0", "1(%2)")"\n"    \
@@ -286,6 +476,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadW(addr, value, res)   \
                __asm__ __volatile__ (                      \
                        "1:\t"user_lwl("%0", "3(%2)")"\n"   \
@@ -303,6 +494,40 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no lwl instruction */
+#define     LoadW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n"                      \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_lb("%0", "3(%2)")"\n\t"    \
+                       "2:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:"user_lbu("$1", "0(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
+
 
 #define     LoadHWU(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -326,6 +551,7 @@ extern void show_registers(struct pt_regs *regs);
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
 
+#ifndef CONFIG_CPU_MIPSR6
 #define     LoadWU(addr, value, res)  \
                __asm__ __volatile__ (                      \
                        "1:\t"user_lwl("%0", "3(%2)")"\n"   \
@@ -363,6 +589,86 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=&r" (value), "=r" (res)         \
                        : "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has not lwl and ldl instructions */
+#define            LoadWU(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_lbu("%0", "3(%2)")"\n\t"   \
+                       "2:"user_lbu("$1", "2(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "3:"user_lbu("$1", "1(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "4:"user_lbu("$1", "0(%2)")"\n\t"   \
+                       "sll\t%0, 0x8\n\t"                  \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+
+#define     LoadDW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:lb\t%0, 7(%2)\n\t"               \
+                       "2:lbu\t$1, 6(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "3:lbu\t$1, 5(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "4:lbu\t$1, 4(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "5:lbu\t$1, 3(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "6:lbu\t$1, 2(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "7:lbu\t$1, 1(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "8:lbu\t$1, 0(%2)\n\t"              \
+                       "dsll\t%0, 0x8\n\t"                 \
+                       "or\t%0, $1\n\t"                    \
+                       "li\t%1, 0\n"                       \
+                       ".set\tpop\n\t"                     \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%1, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+                       : "=&r" (value), "=r" (res)         \
+                       : "r" (addr), "i" (-EFAULT));
+#endif /* CONFIG_CPU_MIPSR6 */
 
 #define     StoreHW(addr, value, res) \
                __asm__ __volatile__ (                      \
@@ -384,7 +690,7 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                        : "=r" (res)                        \
                        : "r" (value), "r" (addr), "i" (-EFAULT));
-
+#ifndef CONFIG_CPU_MIPSR6
 #define     StoreW(addr, value, res)  \
                __asm__ __volatile__ (                      \
                        "1:\t"user_swl("%1", "3(%2)")"\n"   \
@@ -420,6 +726,79 @@ extern void show_registers(struct pt_regs *regs);
                        ".previous"                         \
                : "=r" (res)                                \
                : "r" (value), "r" (addr), "i" (-EFAULT));
+#else
+/* MIPSR6 has no swl and sdl instructions */
+#define     StoreW(addr, value, res)  \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:"user_sb("%1", "0(%2)")"\n\t"    \
+                       "srl\t$1, %1, 0x8\n\t"              \
+                       "2:"user_sb("$1", "1(%2)")"\n\t"    \
+                       "srl\t$1, $1,  0x8\n\t"             \
+                       "3:"user_sb("$1", "2(%2)")"\n\t"    \
+                       "srl\t$1, $1, 0x8\n\t"              \
+                       "4:"user_sb("$1", "3(%2)")"\n\t"    \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+
+#define     StoreDW(addr, value, res) \
+               __asm__ __volatile__ (                      \
+                       ".set\tpush\n\t"                    \
+                       ".set\tnoat\n\t"                    \
+                       "1:sb\t%1, 0(%2)\n\t"               \
+                       "dsrl\t$1, %1, 0x8\n\t"             \
+                       "2:sb\t$1, 1(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "3:sb\t$1, 2(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "4:sb\t$1, 3(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "5:sb\t$1, 4(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "6:sb\t$1, 5(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "7:sb\t$1, 6(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       "8:sb\t$1, 7(%2)\n\t"               \
+                       "dsrl\t$1, $1, 0x8\n\t"             \
+                       ".set\tpop\n\t"                     \
+                       "li\t%0, 0\n"                       \
+                       "10:\n\t"                           \
+                       ".insn\n\t"                         \
+                       ".section\t.fixup,\"ax\"\n\t"       \
+                       "11:\tli\t%0, %3\n\t"               \
+                       "j\t10b\n\t"                        \
+                       ".previous\n\t"                     \
+                       ".section\t__ex_table,\"a\"\n\t"    \
+                       STR(PTR)"\t1b, 11b\n\t"             \
+                       STR(PTR)"\t2b, 11b\n\t"             \
+                       STR(PTR)"\t3b, 11b\n\t"             \
+                       STR(PTR)"\t4b, 11b\n\t"             \
+                       STR(PTR)"\t5b, 11b\n\t"             \
+                       STR(PTR)"\t6b, 11b\n\t"             \
+                       STR(PTR)"\t7b, 11b\n\t"             \
+                       STR(PTR)"\t8b, 11b\n\t"             \
+                       ".previous"                         \
+               : "=&r" (res)                               \
+               : "r" (value), "r" (addr), "i" (-EFAULT)    \
+               : "memory");
+#endif /* CONFIG_CPU_MIPSR6 */
 #endif
 
 static void emulate_load_store_insn(struct pt_regs *regs,
@@ -703,10 +1082,13 @@ static void emulate_load_store_insn(struct pt_regs *regs,
                        break;
                return;
 
+#ifndef CONFIG_CPU_MIPSR6
        /*
         * COP2 is available to implementor for application specific use.
         * It's up to applications to register a notifier chain and do
         * whatever they have to do, including possible sending of signals.
+        *
+        * This instruction has been reallocated in Release 6
         */
        case lwc2_op:
                cu2_notifier_call_chain(CU2_LWC2_OP, regs);
@@ -723,7 +1105,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
        case sdc2_op:
                cu2_notifier_call_chain(CU2_SDC2_OP, regs);
                break;
-
+#endif
        default:
                /*
                 * Pheeee...  We encountered an yet unknown instruction or
index eeddc58802e11a57595ee7d123e0538577af0c67..1e9e900cd3c382a4a8c3bce2ab7ca2987a1acfcd 100644 (file)
@@ -8,6 +8,7 @@ lib-y   += bitops.o csum_partial.o delay.o memcpy.o memset.o \
 
 obj-y                  += iomap.o
 obj-$(CONFIG_PCI)      += iomap-pci.o
+lib-$(CONFIG_GENERIC_CSUM)     := $(filter-out csum_partial.o, $(lib-y))
 
 obj-$(CONFIG_CPU_GENERIC_DUMP_TLB) += dump_tlb.o
 obj-$(CONFIG_CPU_R3000)                += r3k_dump_tlb.o
index 5d3238af9b5cc551ecb0ca9670b242b62f181a73..9245e1705e691124ad3f4eb563cfe680184c30cf 100644 (file)
         and    t0, src, ADDRMASK
        PREFS(  0, 2*32(src) )
        PREFD(  1, 2*32(dst) )
+#ifndef CONFIG_CPU_MIPSR6
        bnez    t1, .Ldst_unaligned\@
         nop
        bnez    t0, .Lsrc_unaligned_dst_aligned\@
+#else
+       or      t0, t0, t1
+       bnez    t0, .Lcopy_unaligned_bytes\@
+#endif
        /*
         * use delay slot for fall-through
         * src and dst are aligned; need to compute rem
        bne     rem, len, 1b
        .set    noreorder
 
+#ifndef CONFIG_CPU_MIPSR6
        /*
         * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
         * A loop would do only a byte at a time with possible branch
        bne     len, rem, 1b
        .set    noreorder
 
+#endif /* !CONFIG_CPU_MIPSR6 */
 .Lcopy_bytes_checklen\@:
        beqz    len, .Ldone\@
         nop
 .Ldone\@:
        jr      ra
         nop
+
+#ifdef CONFIG_CPU_MIPSR6
+.Lcopy_unaligned_bytes\@:
+1:
+       COPY_BYTE(0)
+       COPY_BYTE(1)
+       COPY_BYTE(2)
+       COPY_BYTE(3)
+       COPY_BYTE(4)
+       COPY_BYTE(5)
+       COPY_BYTE(6)
+       COPY_BYTE(7)
+       ADD     src, src, 8
+       b       1b
+        ADD    dst, dst, 8
+#endif /* CONFIG_CPU_MIPSR6 */
        .if __memcpy == 1
        END(memcpy)
        .set __memcpy, 0
index c8fe6b1968fb313dca7e9bb307ec89b159746730..b8e63fd0037547cb9939907696da7ae55d699a95 100644 (file)
        .set            at
 #endif
 
+#ifndef CONFIG_CPU_MIPSR6
        R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
        EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
        PTR_SUBU        a0, t0                  /* long align ptr */
        PTR_ADDU        a2, t0                  /* correct size */
 
+#else /* CONFIG_CPU_MIPSR6 */
+#define STORE_BYTE(N)                          \
+       EX(sb, a1, N(a0), .Lbyte_fixup\@);      \
+       beqz            t0, 0f;                 \
+       PTR_ADDU        t0, 1;
+
+       PTR_ADDU        a2, t0                  /* correct size */
+       PTR_ADDU        t0, 1
+       STORE_BYTE(0)
+       STORE_BYTE(1)
+#if LONGSIZE == 4
+       EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+       STORE_BYTE(2)
+       STORE_BYTE(3)
+       STORE_BYTE(4)
+       STORE_BYTE(5)
+       EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+       ori             a0, STORMASK
+       xori            a0, STORMASK
+       PTR_ADDIU       a0, STORSIZE
+#endif /* CONFIG_CPU_MIPSR6 */
 1:     ori             t1, a2, 0x3f            /* # of full blocks */
        xori            t1, 0x3f
        beqz            t1, .Lmemset_partial\@  /* no block to fill */
        andi            a2, STORMASK            /* At most one long to go */
 
        beqz            a2, 1f
+#ifndef CONFIG_CPU_MIPSR6
        PTR_ADDU        a0, a2                  /* What's left */
        R10KCBARRIER(0(ra))
 #ifdef __MIPSEB__
 #else
        EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
 #endif
+#else
+       PTR_SUBU        t0, $0, a2
+       PTR_ADDIU       t0, 1
+       STORE_BYTE(0)
+       STORE_BYTE(1)
+#if LONGSIZE == 4
+       EX(sb, a1, 2(a0), .Lbyte_fixup\@)
+#else
+       STORE_BYTE(2)
+       STORE_BYTE(3)
+       STORE_BYTE(4)
+       STORE_BYTE(5)
+       EX(sb, a1, 6(a0), .Lbyte_fixup\@)
+#endif
+0:
+#endif
 1:     jr              ra
        move            a2, zero
 
        .hidden __memset
        .endif
 
+.Lbyte_fixup\@:
+       PTR_SUBU        a2, $0, t0
+       jr              ra
+        PTR_ADDIU      a2, 1
+
 .Lfirst_fixup\@:
        jr      ra
        nop
index be777d9a3f85969a6b3104d392402ffe37d12c9d..272af8ac2425290c892849186b170f4b95c259b6 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/export.h>
 #include <linux/stringify.h>
 
-#ifndef CONFIG_CPU_MIPSR2
+#if !defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_MIPSR6)
 
 /*
  * For cli() we have to insert nops to make sure that the new value
index 9dfcd7fc1bc3dd712980c93f95ea4b8c1f3049d9..b30bf65c7d7d81ea1ed7e714d7a3554139855207 100644 (file)
@@ -48,6 +48,7 @@
 #include <asm/processor.h>
 #include <asm/fpu_emulator.h>
 #include <asm/fpu.h>
+#include <asm/mips-r2-to-r6-emul.h>
 
 #include "ieee754.h"
 
@@ -68,7 +69,7 @@ static int fpux_emu(struct pt_regs *,
 #define modeindex(v) ((v) & FPU_CSR_RM)
 
 /* convert condition code register number to csr bit */
-static const unsigned int fpucondbit[8] = {
+const unsigned int fpucondbit[8] = {
        FPU_CSR_COND0,
        FPU_CSR_COND1,
        FPU_CSR_COND2,
@@ -448,6 +449,9 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.next_pc_inc;
                        /* Fall through */
                case jr_op:
+                       /* For R6, JR already emulated in jalr_op */
+                       if (NO_R6EMU && insn.r_format.opcode == jr_op)
+                               break;
                        *contpc = regs->regs[insn.r_format.rs];
                        return 1;
                }
@@ -456,12 +460,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                switch (insn.i_format.rt) {
                case bltzal_op:
                case bltzall_op:
+                       if (NO_R6EMU && (insn.i_format.rs ||
+                           insn.i_format.rt == bltzall_op))
+                               break;
+
                        regs->regs[31] = regs->cp0_epc +
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                        /* Fall through */
-               case bltz_op:
                case bltzl_op:
+                       if (NO_R6EMU)
+                               break;
+               case bltz_op:
                        if ((long)regs->regs[insn.i_format.rs] < 0)
                                *contpc = regs->cp0_epc +
                                        dec_insn.pc_inc +
@@ -473,12 +483,18 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                        return 1;
                case bgezal_op:
                case bgezall_op:
+                       if (NO_R6EMU && (insn.i_format.rs ||
+                           insn.i_format.rt == bgezall_op))
+                               break;
+
                        regs->regs[31] = regs->cp0_epc +
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                        /* Fall through */
-               case bgez_op:
                case bgezl_op:
+                       if (NO_R6EMU)
+                               break;
+               case bgez_op:
                        if ((long)regs->regs[insn.i_format.rs] >= 0)
                                *contpc = regs->cp0_epc +
                                        dec_insn.pc_inc +
@@ -505,8 +521,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                /* Set microMIPS mode bit: XOR for jalx. */
                *contpc ^= bit;
                return 1;
-       case beq_op:
        case beql_op:
+               if (NO_R6EMU)
+                       break;
+       case beq_op:
                if (regs->regs[insn.i_format.rs] ==
                    regs->regs[insn.i_format.rt])
                        *contpc = regs->cp0_epc +
@@ -517,8 +535,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                return 1;
-       case bne_op:
        case bnel_op:
+               if (NO_R6EMU)
+                       break;
+       case bne_op:
                if (regs->regs[insn.i_format.rs] !=
                    regs->regs[insn.i_format.rt])
                        *contpc = regs->cp0_epc +
@@ -529,8 +549,34 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                return 1;
-       case blez_op:
        case blezl_op:
+               if (NO_R6EMU)
+                       break;
+       case blez_op:
+
+               /*
+                * Compact branches for R6 for the
+                * blez and blezl opcodes.
+                * BLEZ  | rs = 0 | rt != 0  == BLEZALC
+                * BLEZ  | rs = rt != 0      == BGEZALC
+                * BLEZ  | rs != 0 | rt != 0 == BGEUC
+                * BLEZL | rs = 0 | rt != 0  == BLEZC
+                * BLEZL | rs = rt != 0      == BGEZC
+                * BLEZL | rs != 0 | rt != 0 == BGEC
+                *
+                * For real BLEZ{,L}, rt is always 0.
+                */
+               if (cpu_has_mips_r6 && insn.i_format.rt) {
+                       if ((insn.i_format.opcode == blez_op) &&
+                           ((!insn.i_format.rs && insn.i_format.rt) ||
+                            (insn.i_format.rs == insn.i_format.rt)))
+                               regs->regs[31] = regs->cp0_epc +
+                                       dec_insn.pc_inc;
+                       *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+
+                       return 1;
+               }
                if ((long)regs->regs[insn.i_format.rs] <= 0)
                        *contpc = regs->cp0_epc +
                                dec_insn.pc_inc +
@@ -540,8 +586,35 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                return 1;
-       case bgtz_op:
        case bgtzl_op:
+               if (NO_R6EMU)
+                       break;
+       case bgtz_op:
+               /*
+                * Compact branches for R6 for the
+                * bgtz and bgtzl opcodes.
+                * BGTZ  | rs = 0 | rt != 0  == BGTZALC
+                * BGTZ  | rs = rt != 0      == BLTZALC
+                * BGTZ  | rs != 0 | rt != 0 == BLTUC
+                * BGTZL | rs = 0 | rt != 0  == BGTZC
+                * BGTZL | rs = rt != 0      == BLTZC
+                * BGTZL | rs != 0 | rt != 0 == BLTC
+                *
+                * *ZALC varint for BGTZ &&& rt != 0
+                * For real GTZ{,L}, rt is always 0.
+                */
+               if (cpu_has_mips_r6 && insn.i_format.rt) {
+                       if ((insn.i_format.opcode == blez_op) &&
+                           ((!insn.i_format.rs && insn.i_format.rt) ||
+                            (insn.i_format.rs == insn.i_format.rt)))
+                               regs->regs[31] = regs->cp0_epc +
+                                       dec_insn.pc_inc;
+                       *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                               dec_insn.next_pc_inc;
+
+                       return 1;
+               }
+
                if ((long)regs->regs[insn.i_format.rs] > 0)
                        *contpc = regs->cp0_epc +
                                dec_insn.pc_inc +
@@ -551,6 +624,16 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                                dec_insn.pc_inc +
                                dec_insn.next_pc_inc;
                return 1;
+       case cbcond0_op:
+       case cbcond1_op:
+               if (!cpu_has_mips_r6)
+                       break;
+               if (insn.i_format.rt && !insn.i_format.rs)
+                       regs->regs[31] = regs->cp0_epc + 4;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
 #ifdef CONFIG_CPU_CAVIUM_OCTEON
        case lwc2_op: /* This is bbit0 on Octeon */
                if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) == 0)
@@ -576,9 +659,73 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
                else
                        *contpc = regs->cp0_epc + 8;
                return 1;
+#else
+       case bc6_op:
+               /*
+                * Only valid for MIPS R6 but we can still end up
+                * here from a broken userland so just tell emulator
+                * this is not a branch and let it break later on.
+                */
+               if  (!cpu_has_mips_r6)
+                       break;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
+       case balc6_op:
+               if (!cpu_has_mips_r6)
+                       break;
+               regs->regs[31] = regs->cp0_epc + 4;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
+       case beqzcjic_op:
+               if (!cpu_has_mips_r6)
+                       break;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
+       case bnezcjialc_op:
+               if (!cpu_has_mips_r6)
+                       break;
+               if (!insn.i_format.rs)
+                       regs->regs[31] = regs->cp0_epc + 4;
+               *contpc = regs->cp0_epc + dec_insn.pc_inc +
+                       dec_insn.next_pc_inc;
+
+               return 1;
 #endif
        case cop0_op:
        case cop1_op:
+               /* Need to check for R6 bc1nez and bc1eqz branches */
+               if (cpu_has_mips_r6 &&
+                   ((insn.i_format.rs == bc1eqz_op) ||
+                    (insn.i_format.rs == bc1nez_op))) {
+                       bit = 0;
+                       switch (insn.i_format.rs) {
+                       case bc1eqz_op:
+                               if (get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1)
+                                   bit = 1;
+                               break;
+                       case bc1nez_op:
+                               if (!(get_fpr32(&current->thread.fpu.fpr[insn.i_format.rt], 0) & 0x1))
+                                   bit = 1;
+                               break;
+                       }
+                       if (bit)
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       (insn.i_format.simmediate << 2);
+                       else
+                               *contpc = regs->cp0_epc +
+                                       dec_insn.pc_inc +
+                                       dec_insn.next_pc_inc;
+
+                       return 1;
+               }
+               /* R2/R6 compatible cop1 instruction. Fall through */
        case cop2_op:
        case cop1x_op:
                if (insn.i_format.rs == bc_op) {
@@ -1414,14 +1561,14 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                 * achieve full IEEE-754 accuracy - however this emulator does.
                 */
                case frsqrt_op:
-                       if (!cpu_has_mips_4_5_r2)
+                       if (!cpu_has_mips_4_5_r2_r6)
                                return SIGILL;
 
                        handler.u = fpemu_sp_rsqrt;
                        goto scopuop;
 
                case frecip_op:
-                       if (!cpu_has_mips_4_5_r2)
+                       if (!cpu_has_mips_4_5_r2_r6)
                                return SIGILL;
 
                        handler.u = fpemu_sp_recip;
@@ -1616,13 +1763,13 @@ copcsr:
                 * achieve full IEEE-754 accuracy - however this emulator does.
                 */
                case frsqrt_op:
-                       if (!cpu_has_mips_4_5_r2)
+                       if (!cpu_has_mips_4_5_r2_r6)
                                return SIGILL;
 
                        handler.u = fpemu_dp_rsqrt;
                        goto dcopuop;
                case frecip_op:
-                       if (!cpu_has_mips_4_5_r2)
+                       if (!cpu_has_mips_4_5_r2_r6)
                                return SIGILL;
 
                        handler.u = fpemu_dp_recip;
index dd261df005c20c6ce7540fb458a2ec70317d6c99..3f8059602765ea9703841715e15c60a72ab1d123 100644 (file)
@@ -794,7 +794,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
                __asm__ __volatile__ (
                        ".set push\n\t"
                        ".set noat\n\t"
-                       ".set mips3\n\t"
+                       ".set "MIPS_ISA_LEVEL"\n\t"
 #ifdef CONFIG_32BIT
                        "la     $at,1f\n\t"
 #endif
@@ -1255,6 +1255,7 @@ static void probe_pcache(void)
        case CPU_P5600:
        case CPU_PROAPTIV:
        case CPU_M5150:
+       case CPU_QEMU_GENERIC:
                if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
                    (c->icache.waysize > PAGE_SIZE))
                        c->icache.flags |= MIPS_CACHE_ALIASES;
@@ -1472,7 +1473,8 @@ static void setup_scache(void)
 
        default:
                if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-                                   MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+                                   MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
+                                   MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)) {
 #ifdef CONFIG_MIPS_CPU_SCACHE
                        if (mips_sc_init ()) {
                                scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
index 70ab5d664332694e92305331f13ed15a35ab1956..7ff8637e530d7974d002594797d42044505a0467 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/ptrace.h>
+#include <linux/ratelimit.h>
 #include <linux/mman.h>
 #include <linux/mm.h>
 #include <linux/smp.h>
@@ -28,6 +29,8 @@
 #include <asm/highmem.h>               /* For VMALLOC_END */
 #include <linux/kdebug.h>
 
+int show_unhandled_signals = 1;
+
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
@@ -44,6 +47,8 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
        int fault;
        unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
+       static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
+
 #if 0
        printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
               current->comm, current->pid, field, address, write,
@@ -203,15 +208,21 @@ bad_area_nosemaphore:
        if (user_mode(regs)) {
                tsk->thread.cp0_badvaddr = address;
                tsk->thread.error_code = write;
-#if 0
-               printk("do_page_fault() #2: sending SIGSEGV to %s for "
-                      "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
-                      tsk->comm,
-                      write ? "write access to" : "read access from",
-                      field, address,
-                      field, (unsigned long) regs->cp0_epc,
-                      field, (unsigned long) regs->regs[31]);
-#endif
+               if (show_unhandled_signals &&
+                   unhandled_signal(tsk, SIGSEGV) &&
+                   __ratelimit(&ratelimit_state)) {
+                       pr_info("\ndo_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx",
+                               tsk->comm,
+                               write ? "write access to" : "read access from",
+                               field, address);
+                       pr_info("epc = %0*lx in", field,
+                               (unsigned long) regs->cp0_epc);
+                       print_vma_addr(" ", regs->cp0_epc);
+                       pr_info("ra  = %0*lx in", field,
+                               (unsigned long) regs->regs[31]);
+                       print_vma_addr(" ", regs->regs[31]);
+                       pr_info("\n");
+               }
                info.si_signo = SIGSEGV;
                info.si_errno = 0;
                /* info.si_code has been set above */
index b611102e23b5c72948ef5c5b8c1e12d603a0ff15..3f85f921801b84f7f91cd67e42bbe528552d6860 100644 (file)
@@ -72,6 +72,20 @@ static struct uasm_reloc relocs[5];
 #define cpu_is_r4600_v1_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002010)
 #define cpu_is_r4600_v2_x()    ((read_c0_prid() & 0xfffffff0) == 0x00002020)
 
+/*
+ * R6 has a limited offset of the pref instruction.
+ * Skip it if the offset is more than 9 bits.
+ */
+#define _uasm_i_pref(a, b, c, d)               \
+do {                                           \
+       if (cpu_has_mips_r6) {                  \
+               if (c <= 0xff && c >= -0x100)   \
+                       uasm_i_pref(a, b, c, d);\
+       } else {                                \
+               uasm_i_pref(a, b, c, d);        \
+       }                                       \
+} while(0)
+
 static int pref_bias_clear_store;
 static int pref_bias_copy_load;
 static int pref_bias_copy_store;
@@ -178,7 +192,15 @@ static void set_prefetch_parameters(void)
                        pref_bias_copy_load = 256;
                        pref_bias_copy_store = 128;
                        pref_src_mode = Pref_LoadStreamed;
-                       pref_dst_mode = Pref_PrepareForStore;
+                       if (cpu_has_mips_r6)
+                               /*
+                                * Bit 30 (Pref_PrepareForStore) has been
+                                * removed from MIPS R6. Use bit 5
+                                * (Pref_StoreStreamed).
+                                */
+                               pref_dst_mode = Pref_StoreStreamed;
+                       else
+                               pref_dst_mode = Pref_PrepareForStore;
                        break;
                }
        } else {
@@ -214,7 +236,7 @@ static inline void build_clear_pref(u32 **buf, int off)
                return;
 
        if (pref_bias_clear_store) {
-               uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
+               _uasm_i_pref(buf, pref_dst_mode, pref_bias_clear_store + off,
                            A0);
        } else if (cache_line_size == (half_clear_loop_size << 1)) {
                if (cpu_has_cache_cdex_s) {
@@ -357,7 +379,7 @@ static inline void build_copy_load_pref(u32 **buf, int off)
                return;
 
        if (pref_bias_copy_load)
-               uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
+               _uasm_i_pref(buf, pref_src_mode, pref_bias_copy_load + off, A1);
 }
 
 static inline void build_copy_store_pref(u32 **buf, int off)
@@ -366,7 +388,7 @@ static inline void build_copy_store_pref(u32 **buf, int off)
                return;
 
        if (pref_bias_copy_store) {
-               uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
+               _uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
                            A0);
        } else if (cache_line_size == (half_copy_loop_size << 1)) {
                if (cpu_has_cache_cdex_s) {
index 99eb8fabab606afe28781620301f430e4b776fd4..4ceafd13870cd6945634713f04ac3f7d65ba52d2 100644 (file)
@@ -81,6 +81,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
        case CPU_PROAPTIV:
        case CPU_P5600:
        case CPU_BMIPS5000:
+       case CPU_QEMU_GENERIC:
                if (config2 & (1 << 12))
                        return 0;
        }
@@ -104,7 +105,8 @@ static inline int __init mips_sc_probe(void)
 
        /* Ignore anything but MIPSxx processors */
        if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
-                             MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
+                             MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
+                             MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)))
                return 0;
 
        /* Does this MIPS32/MIPS64 CPU have a config2 register? */
index 30639a6e9b8ca3ad3677afb0cbd553b9c9472c18..b2afa49beab082116e282f8b043cb2a65e4c06ce 100644 (file)
@@ -485,13 +485,11 @@ static void r4k_tlb_configure(void)
                 * Enable the no read, no exec bits, and enable large virtual
                 * address.
                 */
-               u32 pg = PG_RIE | PG_XIE;
 #ifdef CONFIG_64BIT
-               pg |= PG_ELPA;
+               set_c0_pagegrain(PG_RIE | PG_XIE | PG_ELPA);
+#else
+               set_c0_pagegrain(PG_RIE | PG_XIE);
 #endif
-               if (cpu_has_rixiex)
-                       pg |= PG_IEC;
-               write_c0_pagegrain(pg);
        }
 
        temp_tlb_entry = current_cpu_data.tlbsize - 1;
index 3978a3d813666f8566159ac20c705c2a5c88df53..d75ff73a20120bf28d919b7b6b5bf30a22b5088f 100644 (file)
@@ -501,7 +501,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
        case tlb_indexed: tlbw = uasm_i_tlbwi; break;
        }
 
-       if (cpu_has_mips_r2) {
+       if (cpu_has_mips_r2_exec_hazard) {
                /*
                 * The architecture spec says an ehb is required here,
                 * but a number of cores do not have the hazard and
@@ -514,6 +514,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
                case CPU_PROAPTIV:
                case CPU_P5600:
                case CPU_M5150:
+               case CPU_QEMU_GENERIC:
                        break;
 
                default:
@@ -1952,7 +1953,7 @@ static void build_r4000_tlb_load_handler(void)
 
                switch (current_cpu_type()) {
                default:
-                       if (cpu_has_mips_r2) {
+                       if (cpu_has_mips_r2_exec_hazard) {
                                uasm_i_ehb(&p);
 
                case CPU_CAVIUM_OCTEON:
@@ -2019,7 +2020,7 @@ static void build_r4000_tlb_load_handler(void)
 
                switch (current_cpu_type()) {
                default:
-                       if (cpu_has_mips_r2) {
+                       if (cpu_has_mips_r2_exec_hazard) {
                                uasm_i_ehb(&p);
 
                case CPU_CAVIUM_OCTEON:
index 8399ddf03a0235c5ce9d5ee28ed5007c9986a8ab..d78178daea4bc2c1e069d2069db976b1ce693740 100644 (file)
         | (e) << RE_SH                                         \
         | (f) << FUNC_SH)
 
-/* Define these when we are not the ISA the kernel is being compiled with. */
-#ifndef CONFIG_CPU_MICROMIPS
-#define MM_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
-#define MM_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
-#define MM_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
-#define MM_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
-#endif
-
 #include "uasm.c"
 
 static struct insn insn_table_MM[] = {
index 8e02291cfc0c1c2d8edeb5733498e877bff09542..b4a8378935625b2e1d0f228f6961ebd8208544f1 100644 (file)
         | (e) << RE_SH                                         \
         | (f) << FUNC_SH)
 
-/* Define these when we are not the ISA the kernel is being compiled with. */
-#ifdef CONFIG_CPU_MICROMIPS
-#define CL_uasm_i_b(buf, off) ISAOPC(_beq)(buf, 0, 0, off)
-#define CL_uasm_i_beqz(buf, rs, off) ISAOPC(_beq)(buf, rs, 0, off)
-#define CL_uasm_i_beqzl(buf, rs, off) ISAOPC(_beql)(buf, rs, 0, off)
-#define CL_uasm_i_bnez(buf, rs, off) ISAOPC(_bne)(buf, rs, 0, off)
-#endif
+/* This macro sets the non-variable bits of an R6 instruction. */
+#define M6(a, b, c, d, e)                                      \
+       ((a) << OP_SH                                           \
+        | (b) << RS_SH                                         \
+        | (c) << RT_SH                                         \
+        | (d) << SIMM9_SH                                      \
+        | (e) << FUNC_SH)
 
 #include "uasm.c"
 
@@ -62,7 +62,11 @@ static struct insn insn_table[] = {
        { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM },
        { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM },
        { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_cache,  M(cache_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+       { insn_cache,  M6(cache_op, 0, 0, 0, cache6_op),  RS | RT | SIMM9 },
+#endif
        { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
        { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
        { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
@@ -85,13 +89,22 @@ static struct insn insn_table[] = {
        { insn_jal,  M(jal_op, 0, 0, 0, 0, 0),  JIMM },
        { insn_jalr,  M(spec_op, 0, 0, 0, 0, jalr_op), RS | RD },
        { insn_j,  M(j_op, 0, 0, 0, 0, 0),  JIMM },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_jr,  M(spec_op, 0, 0, 0, 0, jr_op),  RS },
+#else
+       { insn_jr,  M(spec_op, 0, 0, 0, 0, jalr_op),  RS },
+#endif
        { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
        { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
        { insn_lh,  M(lh_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+       { insn_lld,  M6(spec3_op, 0, 0, 0, lld6_op),  RS | RT | SIMM9 },
+       { insn_ll,  M6(spec3_op, 0, 0, 0, ll6_op),  RS | RT | SIMM9 },
+#endif
        { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
        { insn_lw,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
@@ -104,11 +117,20 @@ static struct insn insn_table[] = {
        { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
        { insn_ori,  M(ori_op, 0, 0, 0, 0, 0),  RS | RT | UIMM },
        { insn_or,  M(spec_op, 0, 0, 0, 0, or_op),  RS | RT | RD },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_pref,  M(pref_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+       { insn_pref,  M6(spec3_op, 0, 0, 0, pref6_op),  RS | RT | SIMM9 },
+#endif
        { insn_rfe,  M(cop0_op, cop_op, 0, 0, 0, rfe_op),  0 },
        { insn_rotr,  M(spec_op, 1, 0, 0, 0, srl_op),  RT | RD | RE },
+#ifndef CONFIG_CPU_MIPSR6
        { insn_scd,  M(scd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sc,  M(sc_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+#else
+       { insn_scd,  M6(spec3_op, 0, 0, 0, scd6_op),  RS | RT | SIMM9 },
+       { insn_sc,  M6(spec3_op, 0, 0, 0, sc6_op),  RS | RT | SIMM9 },
+#endif
        { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
        { insn_sllv,  M(spec_op, 0, 0, 0, 0, sllv_op),  RS | RT | RD },
@@ -198,6 +220,8 @@ static void build_insn(u32 **buf, enum opcode opc, ...)
                op |= build_set(va_arg(ap, u32));
        if (ip->fields & SCIMM)
                op |= build_scimm(va_arg(ap, u32));
+       if (ip->fields & SIMM9)
+               op |= build_scimm9(va_arg(ap, u32));
        va_end(ap);
 
        **buf = op;
index 4adf30284813a93aa11143ddbfb433831327e329..319051c34343ae9e0eb7c5d4adf82732e5598cd6 100644 (file)
@@ -24,7 +24,8 @@ enum fields {
        JIMM = 0x080,
        FUNC = 0x100,
        SET = 0x200,
-       SCIMM = 0x400
+       SCIMM = 0x400,
+       SIMM9 = 0x800,
 };
 
 #define OP_MASK                0x3f
@@ -41,6 +42,8 @@ enum fields {
 #define FUNC_SH                0
 #define SET_MASK       0x7
 #define SET_SH         0
+#define SIMM9_SH       7
+#define SIMM9_MASK     0x1ff
 
 enum opcode {
        insn_invalid,
@@ -116,6 +119,14 @@ static inline u32 build_scimm(u32 arg)
        return (arg & SCIMM_MASK) << SCIMM_SH;
 }
 
+static inline u32 build_scimm9(s32 arg)
+{
+       WARN((arg > 0xff || arg < -0x100),
+              KERN_WARNING "Micro-assembler field overflow\n");
+
+       return (arg & SIMM9_MASK) << SIMM9_SH;
+}
+
 static inline u32 build_func(u32 arg)
 {
        WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n");
@@ -330,7 +341,7 @@ I_u3u1u2(_ldx)
 void ISAFUNC(uasm_i_pref)(u32 **buf, unsigned int a, signed int b,
                            unsigned int c)
 {
-       if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5)
+       if (CAVIUM_OCTEON_DCACHE_PREFETCH_WAR && a <= 24 && a != 5)
                /*
                 * As per erratum Core-14449, replace prefetches 0-4,
                 * 6-24 with 'pref 28'.
index ec1dd2491f962da8103e616b3c91b3758e0ddcc9..e1d69895fb1de44f5d8503027f86ebb50f40d5a6 100644 (file)
@@ -72,7 +72,7 @@ void read_persistent_clock(struct timespec *ts)
 int get_c0_perfcount_int(void)
 {
        if (gic_present)
-               return gic_get_c0_compare_int();
+               return gic_get_c0_perfcount_int();
        if (cp0_perfcount_irq >= 0)
                return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
        return -1;
index f2355e3e65a10c4259015c48d273236630102c2d..f97e169393bc3060eb778835cbb1b4813f31b140 100644 (file)
@@ -173,8 +173,8 @@ static int bcm1480_pcibios_write(struct pci_bus *bus, unsigned int devfn,
 }
 
 struct pci_ops bcm1480_pci_ops = {
-       .read = bcm1480_pcibios_read,
-       .write = bcm1480_pcibios_write,
+       .read   = bcm1480_pcibios_read,
+       .write  = bcm1480_pcibios_write,
 };
 
 static struct resource bcm1480_mem_resource = {
index bedb72bd3a27155fb4068cfb7b9ee69d589f0c22..a04af55d89f10a55593b2b16f53f20c7c16ee95d 100644 (file)
@@ -327,8 +327,8 @@ static int octeon_write_config(struct pci_bus *bus, unsigned int devfn,
 
 
 static struct pci_ops octeon_pci_ops = {
-       .read = octeon_read_config,
-       .write = octeon_write_config,
+       .read   = octeon_read_config,
+       .write  = octeon_write_config,
 };
 
 static struct resource octeon_pci_mem_resource = {
index eb4a17ba4a530a9a73702bfb6fd6db1890295bb4..1bb0b2bf8d6ea1e411266fd3d8de6a5afc50fba0 100644 (file)
@@ -1792,8 +1792,8 @@ static int octeon_dummy_write_config(struct pci_bus *bus, unsigned int devfn,
 }
 
 static struct pci_ops octeon_pcie0_ops = {
-       .read = octeon_pcie0_read_config,
-       .write = octeon_pcie0_write_config,
+       .read   = octeon_pcie0_read_config,
+       .write  = octeon_pcie0_write_config,
 };
 
 static struct resource octeon_pcie0_mem_resource = {
@@ -1813,8 +1813,8 @@ static struct pci_controller octeon_pcie0_controller = {
 };
 
 static struct pci_ops octeon_pcie1_ops = {
-       .read = octeon_pcie1_read_config,
-       .write = octeon_pcie1_write_config,
+       .read   = octeon_pcie1_read_config,
+       .write  = octeon_pcie1_write_config,
 };
 
 static struct resource octeon_pcie1_mem_resource = {
@@ -1834,8 +1834,8 @@ static struct pci_controller octeon_pcie1_controller = {
 };
 
 static struct pci_ops octeon_dummy_ops = {
-       .read = octeon_dummy_read_config,
-       .write = octeon_dummy_write_config,
+       .read   = octeon_dummy_read_config,
+       .write  = octeon_dummy_write_config,
 };
 
 static struct resource octeon_dummy_mem_resource = {
index 6073ca456d110cace96281adf4337f5104c5d22c..4190093d30533d7d8046c9ad5342a28426c70a75 100644 (file)
@@ -36,14 +36,14 @@ config PMC_MSP7120_FPGA
 endchoice
 
 config MSP_HAS_USB
-       boolean
+       bool
        depends on PMC_MSP
 
 config MSP_ETH
-       boolean
+       bool
        select MSP_HAS_MAC
        depends on PMC_MSP
 
 config MSP_HAS_MAC
-       boolean
+       bool
        depends on PMC_MSP
index 8f1b86d4da84a8f4a4f894e50f61a76ef009554f..cdf1876000101e25a153f04415e622820418ba13 100644 (file)
@@ -152,28 +152,6 @@ static int gio_device_remove(struct device *dev)
        return 0;
 }
 
-static int gio_device_suspend(struct device *dev, pm_message_t state)
-{
-       struct gio_device *gio_dev = to_gio_device(dev);
-       struct gio_driver *drv = to_gio_driver(dev->driver);
-       int error = 0;
-
-       if (dev->driver && drv->suspend)
-               error = drv->suspend(gio_dev, state);
-       return error;
-}
-
-static int gio_device_resume(struct device *dev)
-{
-       struct gio_device *gio_dev = to_gio_device(dev);
-       struct gio_driver *drv = to_gio_driver(dev->driver);
-       int error = 0;
-
-       if (dev->driver && drv->resume)
-               error = drv->resume(gio_dev);
-       return error;
-}
-
 static void gio_device_shutdown(struct device *dev)
 {
        struct gio_device *gio_dev = to_gio_device(dev);
@@ -400,8 +378,6 @@ static struct bus_type gio_bus_type = {
        .match     = gio_bus_match,
        .probe     = gio_device_probe,
        .remove    = gio_device_remove,
-       .suspend   = gio_device_suspend,
-       .resume    = gio_device_resume,
        .shutdown  = gio_device_shutdown,
        .uevent    = gio_device_uevent,
 };
index ac37e54b3d5e7454fc2c78daf88008655d6b8870..e44a15d4f57364be376af2ee4c9c625d13e53371 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright (C) 1997, 1998, 1999, 2000, 06 by Ralf Baechle
  * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  */
+#include <linux/compiler.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/timer.h>
@@ -25,9 +26,9 @@
 #include <asm/sn/gda.h>
 #include <asm/sn/sn0/hub.h>
 
-void machine_restart(char *command) __attribute__((noreturn));
-void machine_halt(void) __attribute__((noreturn));
-void machine_power_off(void) __attribute__((noreturn));
+void machine_restart(char *command) __noreturn;
+void machine_halt(void) __noreturn;
+void machine_power_off(void) __noreturn;
 
 #define noreturn while(1);                             /* Silence gcc.  */
 
index 1f823da4c77bca73980094d4b27a043d24cffaa8..44b3470a0bbb71babcc9af5131d566d22aa317c5 100644 (file)
@@ -8,6 +8,7 @@
  * Copyright (C) 2003 Guido Guenther <agx@sigxcpu.org>
  */
 
+#include <linux/compiler.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -35,9 +36,9 @@
 static struct timer_list power_timer, blink_timer, debounce_timer;
 static int has_panicked, shuting_down;
 
-static void ip32_machine_restart(char *command) __attribute__((noreturn));
-static void ip32_machine_halt(void) __attribute__((noreturn));
-static void ip32_machine_power_off(void) __attribute__((noreturn));
+static void ip32_machine_restart(char *command) __noreturn;
+static void ip32_machine_halt(void) __noreturn;
+static void ip32_machine_power_off(void) __noreturn;
 
 static void ip32_machine_restart(char *cmd)
 {
index 91fbb6ee702cb7927e5879576a7926746f4000f8..965a0999fc4c081228a34a61daec7f1ef032a107 100644 (file)
@@ -148,7 +148,7 @@ endef
 # we require gcc 3.3 or above to compile the kernel
 archprepare: checkbin
 checkbin:
-       @if test "$(call cc-version)" -lt "0303"; then \
+       @if test "$(cc-version)" -lt "0303"; then \
                echo -n "Sorry, GCC v3.3 or above is required to build " ; \
                echo "the kernel." ; \
                false ; \
index 132d9c681d6ae275ca6129a20fee4b9e55487840..fc502e042438c237c3c32cfd3fbfd13eb1d057c9 100644 (file)
@@ -314,7 +314,7 @@ TOUT        := .tmp_gas_check
 # - Require gcc 4.0 or above on 64-bit
 # - gcc-4.2.0 has issues compiling modules on 64-bit
 checkbin:
-       @if test "$(call cc-version)" = "0304" ; then \
+       @if test "$(cc-version)" = "0304" ; then \
                if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
                        echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
                        echo 'correctly with gcc-3.4 and your version of binutils.'; \
@@ -322,13 +322,13 @@ checkbin:
                        false; \
                fi ; \
        fi
-       @if test "$(call cc-version)" -lt "0400" \
+       @if test "$(cc-version)" -lt "0400" \
            && test "x${CONFIG_PPC64}" = "xy" ; then \
                 echo -n "Sorry, GCC v4.0 or above is required to build " ; \
                 echo "the 64-bit powerpc kernel." ; \
                 false ; \
         fi
-       @if test "$(call cc-fullversion)" = "040200" \
+       @if test "$(cc-fullversion)" = "040200" \
            && test "x${CONFIG_MODULES}${CONFIG_PPC64}" = "xyy" ; then \
                echo -n '*** GCC-4.2.0 cannot compile the 64-bit powerpc ' ; \
                echo 'kernel with modules enabled.' ; \
index 51866f17068457038503ff14520ef9a67e3a0d7d..ca7957b09a3cc13ad6882006468a7e3431e7a75f 100644 (file)
@@ -142,6 +142,7 @@ CONFIG_VIRT_DRIVERS=y
 CONFIG_FSL_HV_MANAGER=y
 CONFIG_STAGING=y
 CONFIG_FSL_CORENET_CF=y
+CONFIG_CLK_QORIQ=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
index d6c0c819895288c39bd826085e7fadb6793b360a..04737aaa8b6b8ead5bfc2574fabe48afd78c872c 100644 (file)
@@ -122,6 +122,7 @@ CONFIG_DMADEVICES=y
 CONFIG_FSL_DMA=y
 CONFIG_VIRT_DRIVERS=y
 CONFIG_FSL_HV_MANAGER=y
+CONFIG_CLK_QORIQ=y
 CONFIG_FSL_CORENET_CF=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT3_FS=y
index 7316dd15278a35f60b710d0a348bc0d9328830fb..2d7b33fab953283cf391679b1e316e81f53852f9 100644 (file)
@@ -54,6 +54,7 @@
 #include <linux/irq.h>
 #include <linux/delay.h>
 #include <linux/irq_work.h>
+#include <linux/clk-provider.h>
 #include <asm/trace.h>
 
 #include <asm/io.h>
@@ -975,6 +976,10 @@ void __init time_init(void)
 
        init_decrementer_clockevent();
        tick_setup_hrtimer_broadcast();
+
+#ifdef CONFIG_COMMON_CLK
+       of_clk_init(NULL);
+#endif
 }
 
 
index 6eb614a271fbe07390271aca65d802e37d3a6481..f691bcabd71013e23a09a07bfec61a592e1cd237 100644 (file)
@@ -1168,6 +1168,11 @@ static void mpc5121_clk_provide_backwards_compat(void)
        }
 }
 
+/*
+ * The "fixed-clock" nodes (which includes the oscillator node if the board's
+ * DT provides one) has already been scanned by the of_clk_init() in
+ * time_init().
+ */
 int __init mpc5121_clk_init(void)
 {
        struct device_node *clk_np;
@@ -1186,12 +1191,6 @@ int __init mpc5121_clk_init(void)
        /* invalidate all not yet registered clock slots */
        mpc512x_clk_preset_data();
 
-       /*
-        * have the device tree scanned for "fixed-clock" nodes (which
-        * includes the oscillator node if the board's DT provides one)
-        */
-       of_clk_init(NULL);
-
        /*
         * add a dummy clock for those situations where a clock spec is
         * required yet no real clock is involved
index 4c8008dd938e8d0eb5c4fdc18b35b2cfc90f6807..99824ff8dd354e74ff421a2c9bb59243e045d541 100644 (file)
@@ -74,7 +74,7 @@ static void hypfs_remove(struct dentry *dentry)
        parent = dentry->d_parent;
        mutex_lock(&parent->d_inode->i_mutex);
        if (hypfs_positive(dentry)) {
-               if (S_ISDIR(dentry->d_inode->i_mode))
+               if (d_is_dir(dentry))
                        simple_rmdir(parent->d_inode, dentry);
                else
                        simple_unlink(parent->d_inode, dentry);
@@ -144,36 +144,32 @@ static int hypfs_open(struct inode *inode, struct file *filp)
        return nonseekable_open(inode, filp);
 }
 
-static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                             unsigned long nr_segs, loff_t offset)
+static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
 {
-       char *data;
-       ssize_t ret;
-       struct file *filp = iocb->ki_filp;
-       /* XXX: temporary */
-       char __user *buf = iov[0].iov_base;
-       size_t count = iov[0].iov_len;
-
-       if (nr_segs != 1)
-               return -EINVAL;
-
-       data = filp->private_data;
-       ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data));
-       if (ret <= 0)
-               return ret;
+       struct file *file = iocb->ki_filp;
+       char *data = file->private_data;
+       size_t available = strlen(data);
+       loff_t pos = iocb->ki_pos;
+       size_t count;
 
-       iocb->ki_pos += ret;
-       file_accessed(filp);
-
-       return ret;
+       if (pos < 0)
+               return -EINVAL;
+       if (pos >= available || !iov_iter_count(to))
+               return 0;
+       count = copy_to_iter(data + pos, available - pos, to);
+       if (!count)
+               return -EFAULT;
+       iocb->ki_pos = pos + count;
+       file_accessed(file);
+       return count;
 }
-static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                             unsigned long nr_segs, loff_t offset)
+
+static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        int rc;
        struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
        struct hypfs_sb_info *fs_info = sb->s_fs_info;
-       size_t count = iov_length(iov, nr_segs);
+       size_t count = iov_iter_count(from);
 
        /*
         * Currently we only allow one update per second for two reasons:
@@ -202,6 +198,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
        }
        hypfs_update_update(sb);
        rc = count;
+       iov_iter_advance(from, count);
 out:
        mutex_unlock(&fs_info->lock);
        return rc;
@@ -440,10 +437,10 @@ struct dentry *hypfs_create_str(struct dentry *dir,
 static const struct file_operations hypfs_file_ops = {
        .open           = hypfs_open,
        .release        = hypfs_release,
-       .read           = do_sync_read,
-       .write          = do_sync_write,
-       .aio_read       = hypfs_aio_read,
-       .aio_write      = hypfs_aio_write,
+       .read           = new_sync_read,
+       .write          = new_sync_write,
+       .read_iter      = hypfs_read_iter,
+       .write_iter     = hypfs_write_iter,
        .llseek         = no_llseek,
 };
 
index c4fbb9527c5ca2b553ca98c3f7887ea236fdff1c..b1453a2ae1ca583b2d4a0dc99bc325fd30ddf10d 100644 (file)
@@ -18,15 +18,15 @@ struct cpu_topology_s390 {
        cpumask_t book_mask;
 };
 
-extern struct cpu_topology_s390 cpu_topology[NR_CPUS];
+DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
 
-#define topology_physical_package_id(cpu)      (cpu_topology[cpu].socket_id)
-#define topology_thread_id(cpu)                        (cpu_topology[cpu].thread_id)
-#define topology_thread_cpumask(cpu)           (&cpu_topology[cpu].thread_mask)
-#define topology_core_id(cpu)                  (cpu_topology[cpu].core_id)
-#define topology_core_cpumask(cpu)             (&cpu_topology[cpu].core_mask)
-#define topology_book_id(cpu)                  (cpu_topology[cpu].book_id)
-#define topology_book_cpumask(cpu)             (&cpu_topology[cpu].book_mask)
+#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
+#define topology_thread_id(cpu)                  (per_cpu(cpu_topology, cpu).thread_id)
+#define topology_thread_cpumask(cpu)     (&per_cpu(cpu_topology, cpu).thread_mask)
+#define topology_core_id(cpu)            (per_cpu(cpu_topology, cpu).core_id)
+#define topology_core_cpumask(cpu)       (&per_cpu(cpu_topology, cpu).core_mask)
+#define topology_book_id(cpu)            (per_cpu(cpu_topology, cpu).book_id)
+#define topology_book_cpumask(cpu)       (&per_cpu(cpu_topology, cpu).book_mask)
 
 #define mc_capable() 1
 
@@ -51,14 +51,6 @@ static inline void topology_expect_change(void) { }
 #define POLARIZATION_VM                (2)
 #define POLARIZATION_VH                (3)
 
-#ifdef CONFIG_SCHED_BOOK
-void s390_init_cpu_topology(void);
-#else
-static inline void s390_init_cpu_topology(void)
-{
-};
-#endif
-
 #include <asm-generic/topology.h>
 
 #endif /* _ASM_S390_TOPOLOGY_H */
index 632fa06ea162c567923044ddf4e3ced0339d24bd..0969d113b3d68f290a95b3b7e1484bf5c6b8caa3 100644 (file)
@@ -91,12 +91,9 @@ static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
 {
        if (level >= CACHE_MAX_LEVEL)
                return CACHE_TYPE_NOCACHE;
-
        ci += level;
-
        if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
                return CACHE_TYPE_NOCACHE;
-
        return cache_type_map[ci->type];
 }
 
@@ -111,23 +108,19 @@ static inline unsigned long ecag(int ai, int li, int ti)
 }
 
 static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
-                        enum cache_type type, unsigned int level)
+                        enum cache_type type, unsigned int level, int cpu)
 {
        int ti, num_sets;
-       int cpu = smp_processor_id();
 
        if (type == CACHE_TYPE_INST)
                ti = CACHE_TI_INSTRUCTION;
        else
                ti = CACHE_TI_UNIFIED;
-
        this_leaf->level = level + 1;
        this_leaf->type = type;
        this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
-       this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY,
-                                               level, ti);
+       this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
        this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
-
        num_sets = this_leaf->size / this_leaf->coherency_line_size;
        num_sets /= this_leaf->ways_of_associativity;
        this_leaf->number_of_sets = num_sets;
@@ -145,7 +138,6 @@ int init_cache_level(unsigned int cpu)
 
        if (!this_cpu_ci)
                return -EINVAL;
-
        ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
        do {
                ctype = get_cache_type(&ct.ci[0], level);
@@ -154,34 +146,31 @@ int init_cache_level(unsigned int cpu)
                /* Separate instruction and data caches */
                leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
        } while (++level < CACHE_MAX_LEVEL);
-
        this_cpu_ci->num_levels = level;
        this_cpu_ci->num_leaves = leaves;
-
        return 0;
 }
 
 int populate_cache_leaves(unsigned int cpu)
 {
+       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+       struct cacheinfo *this_leaf = this_cpu_ci->info_list;
        unsigned int level, idx, pvt;
        union cache_topology ct;
        enum cache_type ctype;
-       struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
-       struct cacheinfo *this_leaf = this_cpu_ci->info_list;
 
        ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
        for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
             idx < this_cpu_ci->num_leaves; idx++, level++) {
                if (!this_leaf)
                        return -EINVAL;
-
                pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
                ctype = get_cache_type(&ct.ci[0], level);
                if (ctype == CACHE_TYPE_SEPARATE) {
-                       ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level);
-                       ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level);
+                       ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
+                       ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
                } else {
-                       ci_leaf_init(this_leaf++, pvt, ctype, level);
+                       ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
                }
        }
        return 0;
index 70a3294509018e33c49d6b0a383c841d79d2f3af..4427ab7ac23af3e0450e4875b36b83ead687c36d 100644 (file)
@@ -393,17 +393,19 @@ static __init void detect_machine_facilities(void)
                S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
        if (test_facility(129))
                S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
-       if (test_facility(128))
-               S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
 #endif
 }
 
-static int __init nocad_setup(char *str)
+static int __init cad_setup(char *str)
 {
-       S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD;
+       int val;
+
+       get_option(&str, &val);
+       if (val && test_facility(128))
+               S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
        return 0;
 }
-early_param("nocad", nocad_setup);
+early_param("cad", cad_setup);
 
 static int __init cad_init(void)
 {
index bfac77ada4f28137b5545d9268d37346eb3af0ff..a5ea8bc17cb3bdd24f085260ca5d03364d509bec 100644 (file)
@@ -909,7 +909,6 @@ void __init setup_arch(char **cmdline_p)
        setup_lowcore();
        smp_fill_possible_mask();
         cpu_init();
-       s390_init_cpu_topology();
 
        /*
         * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
index a668993ff577f95d40aff6f474e082374967a79b..db8f1115a3bf5530b82eb53d00931cbe94d6522b 100644 (file)
@@ -59,14 +59,13 @@ enum {
        CPU_STATE_CONFIGURED,
 };
 
+static DEFINE_PER_CPU(struct cpu *, cpu_device);
+
 struct pcpu {
-       struct cpu *cpu;
        struct _lowcore *lowcore;       /* lowcore page(s) for the cpu */
-       unsigned long async_stack;      /* async stack for the cpu */
-       unsigned long panic_stack;      /* panic stack for the cpu */
        unsigned long ec_mask;          /* bit mask for ec_xxx functions */
-       int state;                      /* physical cpu state */
-       int polarization;               /* physical polarization */
+       signed char state;              /* physical cpu state */
+       signed char polarization;       /* physical polarization */
        u16 address;                    /* physical cpu address */
 };
 
@@ -173,25 +172,30 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
        pcpu_sigp_retry(pcpu, order, 0);
 }
 
+#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
+
 static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
 {
+       unsigned long async_stack, panic_stack;
        struct _lowcore *lc;
 
        if (pcpu != &pcpu_devices[0]) {
                pcpu->lowcore = (struct _lowcore *)
                        __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
-               pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
-               pcpu->panic_stack = __get_free_page(GFP_KERNEL);
-               if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
+               async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
+               panic_stack = __get_free_page(GFP_KERNEL);
+               if (!pcpu->lowcore || !panic_stack || !async_stack)
                        goto out;
+       } else {
+               async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
+               panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
        }
        lc = pcpu->lowcore;
        memcpy(lc, &S390_lowcore, 512);
        memset((char *) lc + 512, 0, sizeof(*lc) - 512);
-       lc->async_stack = pcpu->async_stack + ASYNC_SIZE
-               - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
-       lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
-               - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
+       lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
+       lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
        lc->cpu_nr = cpu;
        lc->spinlock_lockval = arch_spin_lockval(cpu);
 #ifndef CONFIG_64BIT
@@ -212,8 +216,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
        return 0;
 out:
        if (pcpu != &pcpu_devices[0]) {
-               free_page(pcpu->panic_stack);
-               free_pages(pcpu->async_stack, ASYNC_ORDER);
+               free_page(panic_stack);
+               free_pages(async_stack, ASYNC_ORDER);
                free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
        }
        return -ENOMEM;
@@ -235,11 +239,11 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
 #else
        vdso_free_per_cpu(pcpu->lowcore);
 #endif
-       if (pcpu != &pcpu_devices[0]) {
-               free_page(pcpu->panic_stack);
-               free_pages(pcpu->async_stack, ASYNC_ORDER);
-               free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
-       }
+       if (pcpu == &pcpu_devices[0])
+               return;
+       free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
+       free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
+       free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
 }
 
 #endif /* CONFIG_HOTPLUG_CPU */
@@ -366,7 +370,8 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
 void smp_call_ipl_cpu(void (*func)(void *), void *data)
 {
        pcpu_delegate(&pcpu_devices[0], func, data,
-                     pcpu_devices->panic_stack + PAGE_SIZE);
+                     pcpu_devices->lowcore->panic_stack -
+                     PANIC_FRAME_OFFSET + PAGE_SIZE);
 }
 
 int smp_find_processor_id(u16 address)
@@ -935,10 +940,6 @@ void __init smp_prepare_boot_cpu(void)
        pcpu->state = CPU_STATE_CONFIGURED;
        pcpu->address = stap();
        pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
-       pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
-               + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
-       pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
-               + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
        S390_lowcore.percpu_offset = __per_cpu_offset[0];
        smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
        set_cpu_present(0, true);
@@ -1078,8 +1079,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
                          void *hcpu)
 {
        unsigned int cpu = (unsigned int)(long)hcpu;
-       struct cpu *c = pcpu_devices[cpu].cpu;
-       struct device *s = &c->dev;
+       struct device *s = &per_cpu(cpu_device, cpu)->dev;
        int err = 0;
 
        switch (action & ~CPU_TASKS_FROZEN) {
@@ -1102,7 +1102,7 @@ static int smp_add_present_cpu(int cpu)
        c = kzalloc(sizeof(*c), GFP_KERNEL);
        if (!c)
                return -ENOMEM;
-       pcpu_devices[cpu].cpu = c;
+       per_cpu(cpu_device, cpu) = c;
        s = &c->dev;
        c->hotpluggable = 1;
        rc = register_cpu(c, cpu);
index 24ee33f1af24228e04686700523819fcb77afdf2..14da43b801d93c2041f2d4964028fc180301eeff 100644 (file)
@@ -7,14 +7,14 @@
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
 #include <linux/workqueue.h>
-#include <linux/bootmem.h>
 #include <linux/cpuset.h>
 #include <linux/device.h>
 #include <linux/export.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
-#include <linux/init.h>
 #include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/smp.h>
 #include <linux/mm.h>
@@ -42,8 +42,8 @@ static DEFINE_SPINLOCK(topology_lock);
 static struct mask_info socket_info;
 static struct mask_info book_info;
 
-struct cpu_topology_s390 cpu_topology[NR_CPUS];
-EXPORT_SYMBOL_GPL(cpu_topology);
+DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
 
 static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 {
@@ -90,15 +90,15 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
                if (lcpu < 0)
                        continue;
                for (i = 0; i <= smp_cpu_mtid; i++) {
-                       cpu_topology[lcpu + i].book_id = book->id;
-                       cpu_topology[lcpu + i].core_id = rcore;
-                       cpu_topology[lcpu + i].thread_id = lcpu + i;
+                       per_cpu(cpu_topology, lcpu + i).book_id = book->id;
+                       per_cpu(cpu_topology, lcpu + i).core_id = rcore;
+                       per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i;
                        cpumask_set_cpu(lcpu + i, &book->mask);
                        cpumask_set_cpu(lcpu + i, &socket->mask);
                        if (one_socket_per_cpu)
-                               cpu_topology[lcpu + i].socket_id = rcore;
+                               per_cpu(cpu_topology, lcpu + i).socket_id = rcore;
                        else
-                               cpu_topology[lcpu + i].socket_id = socket->id;
+                               per_cpu(cpu_topology, lcpu + i).socket_id = socket->id;
                        smp_cpu_set_polarization(lcpu + i, tl_core->pp);
                }
                if (one_socket_per_cpu)
@@ -249,14 +249,14 @@ static void update_cpu_masks(void)
 
        spin_lock_irqsave(&topology_lock, flags);
        for_each_possible_cpu(cpu) {
-               cpu_topology[cpu].thread_mask = cpu_thread_map(cpu);
-               cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu);
-               cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu);
+               per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
+               per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
+               per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu);
                if (!MACHINE_HAS_TOPOLOGY) {
-                       cpu_topology[cpu].thread_id = cpu;
-                       cpu_topology[cpu].core_id = cpu;
-                       cpu_topology[cpu].socket_id = cpu;
-                       cpu_topology[cpu].book_id = cpu;
+                       per_cpu(cpu_topology, cpu).thread_id = cpu;
+                       per_cpu(cpu_topology, cpu).core_id = cpu;
+                       per_cpu(cpu_topology, cpu).socket_id = cpu;
+                       per_cpu(cpu_topology, cpu).book_id = cpu;
                }
        }
        spin_unlock_irqrestore(&topology_lock, flags);
@@ -334,50 +334,6 @@ void topology_expect_change(void)
        set_topology_timer();
 }
 
-static int __init early_parse_topology(char *p)
-{
-       if (strncmp(p, "off", 3))
-               return 0;
-       topology_enabled = 0;
-       return 0;
-}
-early_param("topology", early_parse_topology);
-
-static void __init alloc_masks(struct sysinfo_15_1_x *info,
-                              struct mask_info *mask, int offset)
-{
-       int i, nr_masks;
-
-       nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
-       for (i = 0; i < info->mnest - offset; i++)
-               nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
-       nr_masks = max(nr_masks, 1);
-       for (i = 0; i < nr_masks; i++) {
-               mask->next = alloc_bootmem_align(
-                       roundup_pow_of_two(sizeof(struct mask_info)),
-                       roundup_pow_of_two(sizeof(struct mask_info)));
-               mask = mask->next;
-       }
-}
-
-void __init s390_init_cpu_topology(void)
-{
-       struct sysinfo_15_1_x *info;
-       int i;
-
-       if (!MACHINE_HAS_TOPOLOGY)
-               return;
-       tl_info = alloc_bootmem_pages(PAGE_SIZE);
-       info = tl_info;
-       store_topology(info);
-       pr_info("The CPU configuration topology of the machine is:");
-       for (i = 0; i < TOPOLOGY_NR_MAG; i++)
-               printk(KERN_CONT " %d", info->mag[i]);
-       printk(KERN_CONT " / %d\n", info->mnest);
-       alloc_masks(info, &socket_info, 1);
-       alloc_masks(info, &book_info, 2);
-}
-
 static int cpu_management;
 
 static ssize_t dispatching_show(struct device *dev,
@@ -467,20 +423,29 @@ int topology_cpu_init(struct cpu *cpu)
 
 const struct cpumask *cpu_thread_mask(int cpu)
 {
-       return &cpu_topology[cpu].thread_mask;
+       return &per_cpu(cpu_topology, cpu).thread_mask;
 }
 
 
 const struct cpumask *cpu_coregroup_mask(int cpu)
 {
-       return &cpu_topology[cpu].core_mask;
+       return &per_cpu(cpu_topology, cpu).core_mask;
 }
 
 static const struct cpumask *cpu_book_mask(int cpu)
 {
-       return &cpu_topology[cpu].book_mask;
+       return &per_cpu(cpu_topology, cpu).book_mask;
 }
 
+static int __init early_parse_topology(char *p)
+{
+       if (strncmp(p, "off", 3))
+               return 0;
+       topology_enabled = 0;
+       return 0;
+}
+early_param("topology", early_parse_topology);
+
 static struct sched_domain_topology_level s390_topology[] = {
        { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
        { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
@@ -489,6 +454,42 @@ static struct sched_domain_topology_level s390_topology[] = {
        { NULL, },
 };
 
+static void __init alloc_masks(struct sysinfo_15_1_x *info,
+                              struct mask_info *mask, int offset)
+{
+       int i, nr_masks;
+
+       nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
+       for (i = 0; i < info->mnest - offset; i++)
+               nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
+       nr_masks = max(nr_masks, 1);
+       for (i = 0; i < nr_masks; i++) {
+               mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
+               mask = mask->next;
+       }
+}
+
+static int __init s390_topology_init(void)
+{
+       struct sysinfo_15_1_x *info;
+       int i;
+
+       if (!MACHINE_HAS_TOPOLOGY)
+               return 0;
+       tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
+       info = tl_info;
+       store_topology(info);
+       pr_info("The CPU configuration topology of the machine is:");
+       for (i = 0; i < TOPOLOGY_NR_MAG; i++)
+               printk(KERN_CONT " %d", info->mag[i]);
+       printk(KERN_CONT " / %d\n", info->mnest);
+       alloc_masks(info, &socket_info, 1);
+       alloc_masks(info, &book_info, 2);
+       set_sched_topology(s390_topology);
+       return 0;
+}
+early_initcall(s390_topology_init);
+
 static int __init topology_init(void)
 {
        if (MACHINE_HAS_TOPOLOGY)
@@ -498,10 +499,3 @@ static int __init topology_init(void)
        return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
 }
 device_initcall(topology_init);
-
-static int __init early_topology_init(void)
-{
-       set_sched_topology(s390_topology);
-       return 0;
-}
-early_initcall(early_topology_init);
index 7699e735ae28ed726f4725ee97619119e1aa5e1e..61541fb93dc63e0f4673dcfe12e9d69681eb0993 100644 (file)
@@ -25,9 +25,7 @@ __kernel_clock_gettime:
        je      4f
        cghi    %r2,__CLOCK_REALTIME
        je      5f
-       cghi    %r2,__CLOCK_THREAD_CPUTIME_ID
-       je      9f
-       cghi    %r2,-2          /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
+       cghi    %r2,-3          /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
        je      9f
        cghi    %r2,__CLOCK_MONOTONIC_COARSE
        je      3f
@@ -106,7 +104,7 @@ __kernel_clock_gettime:
        aghi    %r15,16
        br      %r14
 
-       /* CLOCK_THREAD_CPUTIME_ID for this thread */
+       /* CPUCLOCK_VIRT for this thread */
 9:     icm     %r0,15,__VDSO_ECTG_OK(%r5)
        jz      12f
        ear     %r2,%a4
index d008f638b2cd27f7615c11a02bb5d2a8f3138d04..179a2c20b01f143a51a897d9b85e33198bf881a7 100644 (file)
@@ -183,7 +183,10 @@ unsigned long randomize_et_dyn(void)
 {
        unsigned long base;
 
-       base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT);
+       base = STACK_TOP / 3 * 2;
+       if (!is_32bit_task())
+               /* Align to 4GB */
+               base &= ~((1UL << 32) - 1);
        return base + mmap_rnd();
 }
 
index eb1cf898ed3cb51b30416380fc2109669c4b9f77..c2fb8a87dccb2990a794bb8960bfdad85eb9a390 100644 (file)
@@ -488,6 +488,22 @@ config X86_INTEL_MID
          Intel MID platforms are based on an Intel processor and chipset which
          consume less power than most of the x86 derivatives.
 
+config X86_INTEL_QUARK
+       bool "Intel Quark platform support"
+       depends on X86_32
+       depends on X86_EXTENDED_PLATFORM
+       depends on X86_PLATFORM_DEVICES
+       depends on X86_TSC
+       depends on PCI
+       depends on PCI_GOANY
+       depends on X86_IO_APIC
+       select IOSF_MBI
+       select INTEL_IMR
+       ---help---
+         Select to include support for Quark X1000 SoC.
+         Say Y here if you have a Quark based system such as the Arduino
+         compatible Intel Galileo.
+
 config X86_INTEL_LPSS
        bool "Intel Low Power Subsystem Support"
        depends on ACPI
index 61bd2ad94281884f13b70f3bb9e397fdcef9338a..20028da8ae188ce16aaabdc1ef6da2fd1eb5b8f0 100644 (file)
@@ -313,6 +313,19 @@ config DEBUG_NMI_SELFTEST
 
          If unsure, say N.
 
+config DEBUG_IMR_SELFTEST
+       bool "Isolated Memory Region self test"
+       default n
+       depends on INTEL_IMR
+       ---help---
+         This option enables automated sanity testing of the IMR code.
+         Some simple tests are run to verify IMR bounds checking, alignment
+         and overlapping. This option is really only useful if you are
+         debugging an IMR memory map or are modifying the IMR code and want to
+         test your changes.
+
+         If unsure say N here.
+
 config X86_DEBUG_STATIC_CPU_HAS
        bool "Debug alternatives"
        depends on DEBUG_KERNEL
index 36b62bc52638368c750c250a529995cd8cac80cb..95eba554baf9856f515d30150e66a5333ee04e18 100644 (file)
@@ -30,7 +30,7 @@ cflags-y += -ffreestanding
 # Disable unit-at-a-time mode on pre-gcc-4.0 compilers, it makes gcc use
 # a lot more stack due to the lack of sharing of stacklots.  Also, gcc
 # 4.3.0 needs -funit-at-a-time for extern inline functions.
-KBUILD_CFLAGS += $(shell if [ $(call cc-version) -lt 0400 ] ; then \
+KBUILD_CFLAGS += $(shell if [ $(cc-version) -lt 0400 ] ; then \
                        echo $(call cc-option,-fno-unit-at-a-time); \
                        else echo $(call cc-option,-funit-at-a-time); fi ;)
 
index 843feb3eb20bd781bf8df6cd2b004450eb2d1c5e..0a291cdfaf77100117baf53b2e3af75a43a8af4c 100644 (file)
@@ -51,6 +51,7 @@ $(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
 
 vmlinux-objs-$(CONFIG_EFI_STUB) += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o \
        $(objtree)/drivers/firmware/efi/libstub/lib.a
+vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
 
 $(obj)/vmlinux: $(vmlinux-objs-y) FORCE
        $(call if_changed,ld)
index bb1376381985edb9f96e49c0a1b0269e56bd0f9e..7083c16cccba0b2b144ea5e03e160ebafe81e855 100644 (file)
 static const char build_str[] = UTS_RELEASE " (" LINUX_COMPILE_BY "@"
                LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION;
 
+struct kaslr_setup_data {
+       __u64 next;
+       __u32 type;
+       __u32 len;
+       __u8 data[1];
+} kaslr_setup_data;
+
 #define I8254_PORT_CONTROL     0x43
 #define I8254_PORT_COUNTER0    0x40
 #define I8254_CMD_READBACK     0xC0
@@ -295,7 +302,29 @@ static unsigned long find_random_addr(unsigned long minimum,
        return slots_fetch_random();
 }
 
-unsigned char *choose_kernel_location(unsigned char *input,
+static void add_kaslr_setup_data(struct boot_params *params, __u8 enabled)
+{
+       struct setup_data *data;
+
+       kaslr_setup_data.type = SETUP_KASLR;
+       kaslr_setup_data.len = 1;
+       kaslr_setup_data.next = 0;
+       kaslr_setup_data.data[0] = enabled;
+
+       data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
+
+       while (data && data->next)
+               data = (struct setup_data *)(unsigned long)data->next;
+
+       if (data)
+               data->next = (unsigned long)&kaslr_setup_data;
+       else
+               params->hdr.setup_data = (unsigned long)&kaslr_setup_data;
+
+}
+
+unsigned char *choose_kernel_location(struct boot_params *params,
+                                     unsigned char *input,
                                      unsigned long input_size,
                                      unsigned char *output,
                                      unsigned long output_size)
@@ -306,14 +335,17 @@ unsigned char *choose_kernel_location(unsigned char *input,
 #ifdef CONFIG_HIBERNATION
        if (!cmdline_find_option_bool("kaslr")) {
                debug_putstr("KASLR disabled by default...\n");
+               add_kaslr_setup_data(params, 0);
                goto out;
        }
 #else
        if (cmdline_find_option_bool("nokaslr")) {
                debug_putstr("KASLR disabled by cmdline...\n");
+               add_kaslr_setup_data(params, 0);
                goto out;
        }
 #endif
+       add_kaslr_setup_data(params, 1);
 
        /* Record the various known unsafe memory ranges. */
        mem_avoid_init((unsigned long)input, input_size,
index 7ff3632806b18ec9a48bd0ae88bdc7a9e9dbe091..99494dff2113e5ac63bcc77aaa58bb01b0506fbb 100644 (file)
@@ -3,28 +3,3 @@
 #include <asm/processor-flags.h>
 
 #include "../../platform/efi/efi_stub_64.S"
-
-#ifdef CONFIG_EFI_MIXED
-       .code64
-       .text
-ENTRY(efi64_thunk)
-       push    %rbp
-       push    %rbx
-
-       subq    $16, %rsp
-       leaq    efi_exit32(%rip), %rax
-       movl    %eax, 8(%rsp)
-       leaq    efi_gdt64(%rip), %rax
-       movl    %eax, 4(%rsp)
-       movl    %eax, 2(%rax)           /* Fixup the gdt base address */
-       leaq    efi32_boot_gdt(%rip), %rax
-       movl    %eax, (%rsp)
-
-       call    __efi64_thunk
-
-       addq    $16, %rsp
-       pop     %rbx
-       pop     %rbp
-       ret
-ENDPROC(efi64_thunk)
-#endif /* CONFIG_EFI_MIXED */
diff --git a/arch/x86/boot/compressed/efi_thunk_64.S b/arch/x86/boot/compressed/efi_thunk_64.S
new file mode 100644 (file)
index 0000000..630384a
--- /dev/null
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2014, 2015 Intel Corporation; author Matt Fleming
+ *
+ * Early support for invoking 32-bit EFI services from a 64-bit kernel.
+ *
+ * Because this thunking occurs before ExitBootServices() we have to
+ * restore the firmware's 32-bit GDT before we make EFI serivce calls,
+ * since the firmware's 32-bit IDT is still currently installed and it
+ * needs to be able to service interrupts.
+ *
+ * On the plus side, we don't have to worry about mangling 64-bit
+ * addresses into 32-bits because we're executing with an identify
+ * mapped pagetable and haven't transitioned to 64-bit virtual addresses
+ * yet.
+ */
+
+#include <linux/linkage.h>
+#include <asm/msr.h>
+#include <asm/page_types.h>
+#include <asm/processor-flags.h>
+#include <asm/segment.h>
+
+       .code64
+       .text
+ENTRY(efi64_thunk)
+       push    %rbp
+       push    %rbx
+
+       subq    $8, %rsp
+       leaq    efi_exit32(%rip), %rax
+       movl    %eax, 4(%rsp)
+       leaq    efi_gdt64(%rip), %rax
+       movl    %eax, (%rsp)
+       movl    %eax, 2(%rax)           /* Fixup the gdt base address */
+
+       movl    %ds, %eax
+       push    %rax
+       movl    %es, %eax
+       push    %rax
+       movl    %ss, %eax
+       push    %rax
+
+       /*
+        * Convert x86-64 ABI params to i386 ABI
+        */
+       subq    $32, %rsp
+       movl    %esi, 0x0(%rsp)
+       movl    %edx, 0x4(%rsp)
+       movl    %ecx, 0x8(%rsp)
+       movq    %r8, %rsi
+       movl    %esi, 0xc(%rsp)
+       movq    %r9, %rsi
+       movl    %esi,  0x10(%rsp)
+
+       sgdt    save_gdt(%rip)
+
+       leaq    1f(%rip), %rbx
+       movq    %rbx, func_rt_ptr(%rip)
+
+       /*
+        * Switch to gdt with 32-bit segments. This is the firmware GDT
+        * that was installed when the kernel started executing. This
+        * pointer was saved at the EFI stub entry point in head_64.S.
+        */
+       leaq    efi32_boot_gdt(%rip), %rax
+       lgdt    (%rax)
+
+       pushq   $__KERNEL_CS
+       leaq    efi_enter32(%rip), %rax
+       pushq   %rax
+       lretq
+
+1:     addq    $32, %rsp
+
+       lgdt    save_gdt(%rip)
+
+       pop     %rbx
+       movl    %ebx, %ss
+       pop     %rbx
+       movl    %ebx, %es
+       pop     %rbx
+       movl    %ebx, %ds
+
+       /*
+        * Convert 32-bit status code into 64-bit.
+        */
+       test    %rax, %rax
+       jz      1f
+       movl    %eax, %ecx
+       andl    $0x0fffffff, %ecx
+       andl    $0xf0000000, %eax
+       shl     $32, %rax
+       or      %rcx, %rax
+1:
+       addq    $8, %rsp
+       pop     %rbx
+       pop     %rbp
+       ret
+ENDPROC(efi64_thunk)
+
+ENTRY(efi_exit32)
+       movq    func_rt_ptr(%rip), %rax
+       push    %rax
+       mov     %rdi, %rax
+       ret
+ENDPROC(efi_exit32)
+
+       .code32
+/*
+ * EFI service pointer must be in %edi.
+ *
+ * The stack should represent the 32-bit calling convention.
+ */
+ENTRY(efi_enter32)
+       movl    $__KERNEL_DS, %eax
+       movl    %eax, %ds
+       movl    %eax, %es
+       movl    %eax, %ss
+
+       /* Reload pgtables */
+       movl    %cr3, %eax
+       movl    %eax, %cr3
+
+       /* Disable paging */
+       movl    %cr0, %eax
+       btrl    $X86_CR0_PG_BIT, %eax
+       movl    %eax, %cr0
+
+       /* Disable long mode via EFER */
+       movl    $MSR_EFER, %ecx
+       rdmsr
+       btrl    $_EFER_LME, %eax
+       wrmsr
+
+       call    *%edi
+
+       /* We must preserve return value */
+       movl    %eax, %edi
+
+       /*
+        * Some firmware will return with interrupts enabled. Be sure to
+        * disable them before we switch GDTs.
+        */
+       cli
+
+       movl    56(%esp), %eax
+       movl    %eax, 2(%eax)
+       lgdtl   (%eax)
+
+       movl    %cr4, %eax
+       btsl    $(X86_CR4_PAE_BIT), %eax
+       movl    %eax, %cr4
+
+       movl    %cr3, %eax
+       movl    %eax, %cr3
+
+       movl    $MSR_EFER, %ecx
+       rdmsr
+       btsl    $_EFER_LME, %eax
+       wrmsr
+
+       xorl    %eax, %eax
+       lldt    %ax
+
+       movl    60(%esp), %eax
+       pushl   $__KERNEL_CS
+       pushl   %eax
+
+       /* Enable paging */
+       movl    %cr0, %eax
+       btsl    $X86_CR0_PG_BIT, %eax
+       movl    %eax, %cr0
+       lret
+ENDPROC(efi_enter32)
+
+       .data
+       .balign 8
+       .global efi32_boot_gdt
+efi32_boot_gdt:        .word   0
+               .quad   0
+
+save_gdt:      .word   0
+               .quad   0
+func_rt_ptr:   .quad   0
+
+       .global efi_gdt64
+efi_gdt64:
+       .word   efi_gdt64_end - efi_gdt64
+       .long   0                       /* Filled out by user */
+       .word   0
+       .quad   0x0000000000000000      /* NULL descriptor */
+       .quad   0x00af9a000000ffff      /* __KERNEL_CS */
+       .quad   0x00cf92000000ffff      /* __KERNEL_DS */
+       .quad   0x0080890000000000      /* TS descriptor */
+       .quad   0x0000000000000000      /* TS continued */
+efi_gdt64_end:
index a950864a64dab3d558197c77bef3c56a07961494..5903089c818f6843b9d1cc7c83507cf0d28a3e26 100644 (file)
@@ -401,7 +401,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
         * the entire decompressed kernel plus relocation table, or the
         * entire decompressed kernel plus .bss and .brk sections.
         */
-       output = choose_kernel_location(input_data, input_len, output,
+       output = choose_kernel_location(real_mode, input_data, input_len,
+                                       output,
                                        output_len > run_size ? output_len
                                                              : run_size);
 
index 04477d68403f1fe6197d82276033ce27338c1bac..ee3576b2666b8139eedf25077ce769da10712c11 100644 (file)
@@ -57,7 +57,8 @@ int cmdline_find_option_bool(const char *option);
 
 #if CONFIG_RANDOMIZE_BASE
 /* aslr.c */
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *params,
+                                     unsigned char *input,
                                      unsigned long input_size,
                                      unsigned char *output,
                                      unsigned long output_size);
@@ -65,7 +66,8 @@ unsigned char *choose_kernel_location(unsigned char *input,
 bool has_cpuflag(int flag);
 #else
 static inline
-unsigned char *choose_kernel_location(unsigned char *input,
+unsigned char *choose_kernel_location(struct boot_params *params,
+                                     unsigned char *input,
                                      unsigned long input_size,
                                      unsigned char *output,
                                      unsigned long output_size)
index 92003f3c8a427b9138796ceef1b76bc237860da3..efc3b22d896eb23b7e37cf9c720065c0b6b0c717 100644 (file)
@@ -213,7 +213,15 @@ void register_lapic_address(unsigned long address);
 extern void setup_boot_APIC_clock(void);
 extern void setup_secondary_APIC_clock(void);
 extern int APIC_init_uniprocessor(void);
+
+#ifdef CONFIG_X86_64
+static inline int apic_force_enable(unsigned long addr)
+{
+       return -1;
+}
+#else
 extern int apic_force_enable(unsigned long addr);
+#endif
 
 extern int apic_bsp_setup(bool upmode);
 extern void apic_ap_setup(void);
diff --git a/arch/x86/include/asm/imr.h b/arch/x86/include/asm/imr.h
new file mode 100644 (file)
index 0000000..cd2ce40
--- /dev/null
@@ -0,0 +1,60 @@
+/*
+ * imr.h: Isolated Memory Region API
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+#ifndef _IMR_H
+#define _IMR_H
+
+#include <linux/types.h>
+
+/*
+ * IMR agent access mask bits
+ * See section 12.7.4.7 from quark-x1000-datasheet.pdf for register
+ * definitions.
+ */
+#define IMR_ESRAM_FLUSH                BIT(31)
+#define IMR_CPU_SNOOP          BIT(30)         /* Applicable only to write */
+#define IMR_RMU                        BIT(29)
+#define IMR_VC1_SAI_ID3                BIT(15)
+#define IMR_VC1_SAI_ID2                BIT(14)
+#define IMR_VC1_SAI_ID1                BIT(13)
+#define IMR_VC1_SAI_ID0                BIT(12)
+#define IMR_VC0_SAI_ID3                BIT(11)
+#define IMR_VC0_SAI_ID2                BIT(10)
+#define IMR_VC0_SAI_ID1                BIT(9)
+#define IMR_VC0_SAI_ID0                BIT(8)
+#define IMR_CPU_0              BIT(1)          /* SMM mode */
+#define IMR_CPU                        BIT(0)          /* Non SMM mode */
+#define IMR_ACCESS_NONE                0
+
+/*
+ * Read/Write access-all bits here include some reserved bits
+ * These are the values firmware uses and are accepted by hardware.
+ * The kernel defines read/write access-all in the same way as firmware
+ * in order to have a consistent and crisp definition across firmware,
+ * bootloader and kernel.
+ */
+#define IMR_READ_ACCESS_ALL    0xBFFFFFFF
+#define IMR_WRITE_ACCESS_ALL   0xFFFFFFFF
+
+/* Number of IMRs provided by Quark X1000 SoC */
+#define QUARK_X1000_IMR_MAX    0x08
+#define QUARK_X1000_IMR_REGBASE 0x40
+
+/* IMR alignment bits - only bits 31:10 are checked for IMR validity */
+#define IMR_ALIGN              0x400
+#define IMR_MASK               (IMR_ALIGN - 1)
+
+int imr_add_range(phys_addr_t base, size_t size,
+                 unsigned int rmask, unsigned int wmask, bool lock);
+
+int imr_remove_range(phys_addr_t base, size_t size);
+
+#endif /* _IMR_H */
index f97fbe3abb67f5059d4e6f0a37261d6113df19de..95e11f79f123c6aadd5ed8888a51e1870405deeb 100644 (file)
@@ -51,6 +51,8 @@ extern int devmem_is_allowed(unsigned long pagenr);
 extern unsigned long max_low_pfn_mapped;
 extern unsigned long max_pfn_mapped;
 
+extern bool kaslr_enabled;
+
 static inline phys_addr_t get_max_mapped(void)
 {
        return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
index 67fc3d2b0aabe6e7b5a5af631b80bf9e8c25a46b..a0c35bf6cb92cf95abe8fc9ffe9f75c4f34b1fca 100644 (file)
@@ -476,12 +476,14 @@ static inline int pmd_present(pmd_t pmd)
  */
 static inline int pte_protnone(pte_t pte)
 {
-       return pte_flags(pte) & _PAGE_PROTNONE;
+       return (pte_flags(pte) & (_PAGE_PROTNONE | _PAGE_PRESENT))
+               == _PAGE_PROTNONE;
 }
 
 static inline int pmd_protnone(pmd_t pmd)
 {
-       return pmd_flags(pmd) & _PAGE_PROTNONE;
+       return (pmd_flags(pmd) & (_PAGE_PROTNONE | _PAGE_PRESENT))
+               == _PAGE_PROTNONE;
 }
 #endif /* CONFIG_NUMA_BALANCING */
 
index 7050d864f5207c4fb384672b083a18a6584bdbbe..cf87de3fc39000eb21028ab2597d5187978bda4a 100644 (file)
@@ -46,7 +46,7 @@ static __always_inline bool static_key_false(struct static_key *key);
 
 static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
 {
-       set_bit(0, (volatile unsigned long *)&lock->tickets.tail);
+       set_bit(0, (volatile unsigned long *)&lock->tickets.head);
 }
 
 #else  /* !CONFIG_PARAVIRT_SPINLOCKS */
@@ -60,10 +60,30 @@ static inline void __ticket_unlock_kick(arch_spinlock_t *lock,
 }
 
 #endif /* CONFIG_PARAVIRT_SPINLOCKS */
+static inline int  __tickets_equal(__ticket_t one, __ticket_t two)
+{
+       return !((one ^ two) & ~TICKET_SLOWPATH_FLAG);
+}
+
+static inline void __ticket_check_and_clear_slowpath(arch_spinlock_t *lock,
+                                                       __ticket_t head)
+{
+       if (head & TICKET_SLOWPATH_FLAG) {
+               arch_spinlock_t old, new;
+
+               old.tickets.head = head;
+               new.tickets.head = head & ~TICKET_SLOWPATH_FLAG;
+               old.tickets.tail = new.tickets.head + TICKET_LOCK_INC;
+               new.tickets.tail = old.tickets.tail;
+
+               /* try to clear slowpath flag when there are no contenders */
+               cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);
+       }
+}
 
 static __always_inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 {
-       return lock.tickets.head == lock.tickets.tail;
+       return __tickets_equal(lock.tickets.head, lock.tickets.tail);
 }
 
 /*
@@ -87,18 +107,21 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
        if (likely(inc.head == inc.tail))
                goto out;
 
-       inc.tail &= ~TICKET_SLOWPATH_FLAG;
        for (;;) {
                unsigned count = SPIN_THRESHOLD;
 
                do {
-                       if (READ_ONCE(lock->tickets.head) == inc.tail)
-                               goto out;
+                       inc.head = READ_ONCE(lock->tickets.head);
+                       if (__tickets_equal(inc.head, inc.tail))
+                               goto clear_slowpath;
                        cpu_relax();
                } while (--count);
                __ticket_lock_spinning(lock, inc.tail);
        }
-out:   barrier();      /* make sure nothing creeps before the lock is taken */
+clear_slowpath:
+       __ticket_check_and_clear_slowpath(lock, inc.head);
+out:
+       barrier();      /* make sure nothing creeps before the lock is taken */
 }
 
 static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
@@ -106,56 +129,30 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
        arch_spinlock_t old, new;
 
        old.tickets = READ_ONCE(lock->tickets);
-       if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
+       if (!__tickets_equal(old.tickets.head, old.tickets.tail))
                return 0;
 
        new.head_tail = old.head_tail + (TICKET_LOCK_INC << TICKET_SHIFT);
+       new.head_tail &= ~TICKET_SLOWPATH_FLAG;
 
        /* cmpxchg is a full barrier, so nothing can move before it */
        return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
 }
 
-static inline void __ticket_unlock_slowpath(arch_spinlock_t *lock,
-                                           arch_spinlock_t old)
-{
-       arch_spinlock_t new;
-
-       BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
-
-       /* Perform the unlock on the "before" copy */
-       old.tickets.head += TICKET_LOCK_INC;
-
-       /* Clear the slowpath flag */
-       new.head_tail = old.head_tail & ~(TICKET_SLOWPATH_FLAG << TICKET_SHIFT);
-
-       /*
-        * If the lock is uncontended, clear the flag - use cmpxchg in
-        * case it changes behind our back though.
-        */
-       if (new.tickets.head != new.tickets.tail ||
-           cmpxchg(&lock->head_tail, old.head_tail,
-                                       new.head_tail) != old.head_tail) {
-               /*
-                * Lock still has someone queued for it, so wake up an
-                * appropriate waiter.
-                */
-               __ticket_unlock_kick(lock, old.tickets.head);
-       }
-}
-
 static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
 {
        if (TICKET_SLOWPATH_FLAG &&
-           static_key_false(&paravirt_ticketlocks_enabled)) {
-               arch_spinlock_t prev;
+               static_key_false(&paravirt_ticketlocks_enabled)) {
+               __ticket_t head;
 
-               prev = *lock;
-               add_smp(&lock->tickets.head, TICKET_LOCK_INC);
+               BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);
 
-               /* add_smp() is a full mb() */
+               head = xadd(&lock->tickets.head, TICKET_LOCK_INC);
 
-               if (unlikely(lock->tickets.tail & TICKET_SLOWPATH_FLAG))
-                       __ticket_unlock_slowpath(lock, prev);
+               if (unlikely(head & TICKET_SLOWPATH_FLAG)) {
+                       head &= ~TICKET_SLOWPATH_FLAG;
+                       __ticket_unlock_kick(lock, (head + TICKET_LOCK_INC));
+               }
        } else
                __add(&lock->tickets.head, TICKET_LOCK_INC, UNLOCK_LOCK_PREFIX);
 }
@@ -164,14 +161,15 @@ static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
        struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 
-       return tmp.tail != tmp.head;
+       return !__tickets_equal(tmp.tail, tmp.head);
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 {
        struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 
-       return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
+       tmp.head &= ~TICKET_SLOWPATH_FLAG;
+       return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
 }
 #define arch_spin_is_contended arch_spin_is_contended
 
@@ -191,8 +189,8 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
                 * We need to check "unlocked" in a loop, tmp.head == head
                 * can be false positive because of overflow.
                 */
-               if (tmp.head == (tmp.tail & ~TICKET_SLOWPATH_FLAG) ||
-                   tmp.head != head)
+               if (__tickets_equal(tmp.head, tmp.tail) ||
+                               !__tickets_equal(tmp.head, head))
                        break;
 
                cpu_relax();
index 225b0988043a0a78ac9092a9af7a265122c685cd..44e6dd7e36a23becd48def85b218b1d70ac938e6 100644 (file)
@@ -7,6 +7,7 @@
 #define SETUP_DTB                      2
 #define SETUP_PCI                      3
 #define SETUP_EFI                      4
+#define SETUP_KASLR                    5
 
 /* ram_size flags */
 #define RAMDISK_IMAGE_START_MASK       0x07FF
index ae97ed0873c6e3f35e28545b521dda9c25c8d74e..3d525c6124f6c720e02d2761b283151be97cf6d7 100644 (file)
@@ -613,6 +613,11 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
 {
        int rc, irq, trigger, polarity;
 
+       if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
+               *irqp = gsi;
+               return 0;
+       }
+
        rc = acpi_get_override_irq(gsi, &trigger, &polarity);
        if (rc == 0) {
                trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
index c6826d1e8082584268d1b5e8f3abeb176ee61187..746e7fd08aad7082747ee1d9dca80570f1a00e3b 100644 (file)
@@ -196,6 +196,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
                struct microcode_header_intel mc_header;
                unsigned int mc_size;
 
+               if (leftover < sizeof(mc_header)) {
+                       pr_err("error! Truncated header in microcode data file\n");
+                       break;
+               }
+
                if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
                        break;
 
index ec9df6f9cd47b35e7f4d6059eb094f922420c5cf..420eb933189ca487110607475ddbf33be8e8267b 100644 (file)
@@ -321,7 +321,11 @@ get_matching_model_microcode(int cpu, unsigned long start,
        unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
        int i;
 
-       while (leftover) {
+       while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
+
+               if (leftover < sizeof(mc_header))
+                       break;
+
                mc_header = (struct microcode_header_intel *)ucode_ptr;
 
                mc_size = get_totalsize(mc_header);
index 705ef8d48e2dc464936672fb54eea908f8f03b4e..67b1cbe0093adba1141f8d9ebda29ad34dc9d23e 100644 (file)
@@ -302,6 +302,9 @@ int check_irq_vectors_for_cpu_disable(void)
                irq = __this_cpu_read(vector_irq[vector]);
                if (irq >= 0) {
                        desc = irq_to_desc(irq);
+                       if (!desc)
+                               continue;
+
                        data = irq_desc_get_irq_data(desc);
                        cpumask_copy(&affinity_new, data->affinity);
                        cpu_clear(this_cpu, affinity_new);
index 98f654d466e585167153e58811902675bfeb5baa..6a1146ea4d4d885dfa197e4d6aea6562c5d05c7e 100644 (file)
@@ -84,7 +84,7 @@ static volatile u32 twobyte_is_boostable[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f          */
        /*      ----------------------------------------------          */
        W(0x00, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0) | /* 00 */
-       W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 10 */
+       W(0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1) , /* 10 */
        W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 20 */
        W(0x30, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
        W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
index 94f6434843008c38b8a091c077e0c94fc36c9a07..e354cc6446aba4286645dc799e48d8539c7005d6 100644 (file)
@@ -609,7 +609,7 @@ static inline void check_zero(void)
        u8 ret;
        u8 old;
 
-       old = ACCESS_ONCE(zero_stats);
+       old = READ_ONCE(zero_stats);
        if (unlikely(old)) {
                ret = cmpxchg(&zero_stats, old, 0);
                /* This ensures only one fellow resets the stat */
@@ -727,6 +727,7 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
        int cpu;
        u64 start;
        unsigned long flags;
+       __ticket_t head;
 
        if (in_nmi())
                return;
@@ -768,11 +769,15 @@ __visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
         */
        __ticket_enter_slowpath(lock);
 
+       /* make sure enter_slowpath, which is atomic does not cross the read */
+       smp_mb__after_atomic();
+
        /*
         * check again make sure it didn't become free while
         * we weren't looking.
         */
-       if (ACCESS_ONCE(lock->tickets.head) == want) {
+       head = READ_ONCE(lock->tickets.head);
+       if (__tickets_equal(head, want)) {
                add_stats(TAKEN_SLOW_PICKUP, 1);
                goto out;
        }
@@ -803,8 +808,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
        add_stats(RELEASED_SLOW, 1);
        for_each_cpu(cpu, &waiting_cpus) {
                const struct kvm_lock_waiting *w = &per_cpu(klock_waiting, cpu);
-               if (ACCESS_ONCE(w->lock) == lock &&
-                   ACCESS_ONCE(w->want) == ticket) {
+               if (READ_ONCE(w->lock) == lock &&
+                   READ_ONCE(w->want) == ticket) {
                        add_stats(RELEASED_SLOW_KICKED, 1);
                        kvm_kick_cpu(cpu);
                        break;
index d1ac80b72c72184a0b999c2b299b5e265d26de7a..9bbb9b35c144a4f721ed4e7dcfc07aee6abdc2cb 100644 (file)
@@ -47,21 +47,13 @@ do {                                                        \
 
 #ifdef CONFIG_RANDOMIZE_BASE
 static unsigned long module_load_offset;
-static int randomize_modules = 1;
 
 /* Mutex protects the module_load_offset. */
 static DEFINE_MUTEX(module_kaslr_mutex);
 
-static int __init parse_nokaslr(char *p)
-{
-       randomize_modules = 0;
-       return 0;
-}
-early_param("nokaslr", parse_nokaslr);
-
 static unsigned long int get_module_load_offset(void)
 {
-       if (randomize_modules) {
+       if (kaslr_enabled) {
                mutex_lock(&module_kaslr_mutex);
                /*
                 * Calculate the module_load_offset the first time this
index 0a2421cca01fad095bbb7caa8e7c779d910d751b..98dc9317286e1e0fad25f3d10efaa3134a9134c0 100644 (file)
 unsigned long max_low_pfn_mapped;
 unsigned long max_pfn_mapped;
 
+bool __read_mostly kaslr_enabled = false;
+
 #ifdef CONFIG_DMI
 RESERVE_BRK(dmi_alloc, 65536);
 #endif
@@ -425,6 +427,11 @@ static void __init reserve_initrd(void)
 }
 #endif /* CONFIG_BLK_DEV_INITRD */
 
+static void __init parse_kaslr_setup(u64 pa_data, u32 data_len)
+{
+       kaslr_enabled = (bool)(pa_data + sizeof(struct setup_data));
+}
+
 static void __init parse_setup_data(void)
 {
        struct setup_data *data;
@@ -450,6 +457,9 @@ static void __init parse_setup_data(void)
                case SETUP_EFI:
                        parse_efi_setup(pa_data, data_len);
                        break;
+               case SETUP_KASLR:
+                       parse_kaslr_setup(pa_data, data_len);
+                       break;
                default:
                        break;
                }
@@ -832,10 +842,14 @@ static void __init trim_low_memory_range(void)
 static int
 dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
 {
-       pr_emerg("Kernel Offset: 0x%lx from 0x%lx "
-                "(relocation range: 0x%lx-0x%lx)\n",
-                (unsigned long)&_text - __START_KERNEL, __START_KERNEL,
-                __START_KERNEL_map, MODULES_VADDR-1);
+       if (kaslr_enabled)
+               pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
+                        (unsigned long)&_text - __START_KERNEL,
+                        __START_KERNEL,
+                        __START_KERNEL_map,
+                        MODULES_VADDR-1);
+       else
+               pr_emerg("Kernel Offset: disabled\n");
 
        return 0;
 }
index 8b96a947021ffe0ad3d05397d3fc44b975a98141..81f8adb0679e548d31af5982297c85141e3693f9 100644 (file)
  * Good-instruction tables for 32-bit apps.  This is non-const and volatile
  * to keep gcc from statically optimizing it out, as variable_test_bit makes
  * some versions of gcc to think only *(unsigned long*) is used.
+ *
+ * Opcodes we'll probably never support:
+ * 6c-6f - ins,outs. SEGVs if used in userspace
+ * e4-e7 - in,out imm. SEGVs if used in userspace
+ * ec-ef - in,out acc. SEGVs if used in userspace
+ * cc - int3. SIGTRAP if used in userspace
+ * ce - into. Not used in userspace - no kernel support to make it useful. SEGVs
+ *     (why we support bound (62) then? it's similar, and similarly unused...)
+ * f1 - int1. SIGTRAP if used in userspace
+ * f4 - hlt. SEGVs if used in userspace
+ * fa - cli. SEGVs if used in userspace
+ * fb - sti. SEGVs if used in userspace
+ *
+ * Opcodes which need some work to be supported:
+ * 07,17,1f - pop es/ss/ds
+ *     Normally not used in userspace, but would execute if used.
+ *     Can cause GP or stack exception if tries to load wrong segment descriptor.
+ *     We hesitate to run them under single step since kernel's handling
+ *     of userspace single-stepping (TF flag) is fragile.
+ *     We can easily refuse to support push es/cs/ss/ds (06/0e/16/1e)
+ *     on the same grounds that they are never used.
+ * cd - int N.
+ *     Used by userspace for "int 80" syscall entry. (Other "int N"
+ *     cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
+ *     Not supported since kernel's handling of userspace single-stepping
+ *     (TF flag) is fragile.
+ * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
  */
 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
 static volatile u32 good_insns_32[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
        /*      ----------------------------------------------         */
-       W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 00 */
+       W(0x00, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 00 */
        W(0x10, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 10 */
-       W(0x20, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* 20 */
-       W(0x30, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1) , /* 30 */
+       W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
+       W(0x30, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
        W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
        W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
-       W(0x60, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
+       W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
        W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
        W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
        W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
        W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
        W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
        W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
-       W(0xd0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
+       W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
        W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
-       W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
+       W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
        /*      ----------------------------------------------         */
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 };
@@ -94,27 +121,61 @@ static volatile u32 good_insns_32[256 / 32] = {
 #define good_insns_32  NULL
 #endif
 
-/* Good-instruction tables for 64-bit apps */
+/* Good-instruction tables for 64-bit apps.
+ *
+ * Genuinely invalid opcodes:
+ * 06,07 - formerly push/pop es
+ * 0e - formerly push cs
+ * 16,17 - formerly push/pop ss
+ * 1e,1f - formerly push/pop ds
+ * 27,2f,37,3f - formerly daa/das/aaa/aas
+ * 60,61 - formerly pusha/popa
+ * 62 - formerly bound. EVEX prefix for AVX512 (not yet supported)
+ * 82 - formerly redundant encoding of Group1
+ * 9a - formerly call seg:ofs
+ * ce - formerly into
+ * d4,d5 - formerly aam/aad
+ * d6 - formerly undocumented salc
+ * ea - formerly jmp seg:ofs
+ *
+ * Opcodes we'll probably never support:
+ * 6c-6f - ins,outs. SEGVs if used in userspace
+ * e4-e7 - in,out imm. SEGVs if used in userspace
+ * ec-ef - in,out acc. SEGVs if used in userspace
+ * cc - int3. SIGTRAP if used in userspace
+ * f1 - int1. SIGTRAP if used in userspace
+ * f4 - hlt. SEGVs if used in userspace
+ * fa - cli. SEGVs if used in userspace
+ * fb - sti. SEGVs if used in userspace
+ *
+ * Opcodes which need some work to be supported:
+ * cd - int N.
+ *     Used by userspace for "int 80" syscall entry. (Other "int N"
+ *     cause GP -> SEGV since their IDT gates don't allow calls from CPL 3).
+ *     Not supported since kernel's handling of userspace single-stepping
+ *     (TF flag) is fragile.
+ * cf - iret. Normally not used in userspace. Doesn't SEGV unless arguments are bad
+ */
 #if defined(CONFIG_X86_64)
 static volatile u32 good_insns_64[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
        /*      ----------------------------------------------         */
-       W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 00 */
+       W(0x00, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* 00 */
        W(0x10, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 10 */
-       W(0x20, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) | /* 20 */
-       W(0x30, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0) , /* 30 */
-       W(0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) | /* 40 */
+       W(0x20, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) | /* 20 */
+       W(0x30, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0) , /* 30 */
+       W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
        W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
-       W(0x60, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
+       W(0x60, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* 60 */
        W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 70 */
        W(0x80, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
-       W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
+       W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1) , /* 90 */
        W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* a0 */
        W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
-       W(0xc0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
+       W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0) | /* c0 */
        W(0xd0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
-       W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0) | /* e0 */
-       W(0xf0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
+       W(0xe0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0) | /* e0 */
+       W(0xf0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1)   /* f0 */
        /*      ----------------------------------------------         */
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 };
@@ -122,49 +183,55 @@ static volatile u32 good_insns_64[256 / 32] = {
 #define good_insns_64  NULL
 #endif
 
-/* Using this for both 64-bit and 32-bit apps */
+/* Using this for both 64-bit and 32-bit apps.
+ * Opcodes we don't support:
+ * 0f 00 - SLDT/STR/LLDT/LTR/VERR/VERW/-/- group. System insns
+ * 0f 01 - SGDT/SIDT/LGDT/LIDT/SMSW/-/LMSW/INVLPG group.
+ *     Also encodes tons of other system insns if mod=11.
+ *     Some are in fact non-system: xend, xtest, rdtscp, maybe more
+ * 0f 05 - syscall
+ * 0f 06 - clts (CPL0 insn)
+ * 0f 07 - sysret
+ * 0f 08 - invd (CPL0 insn)
+ * 0f 09 - wbinvd (CPL0 insn)
+ * 0f 0b - ud2
+ * 0f 30 - wrmsr (CPL0 insn) (then why rdmsr is allowed, it's also CPL0 insn?)
+ * 0f 34 - sysenter
+ * 0f 35 - sysexit
+ * 0f 37 - getsec
+ * 0f 78 - vmread (Intel VMX. CPL0 insn)
+ * 0f 79 - vmwrite (Intel VMX. CPL0 insn)
+ *     Note: with prefixes, these two opcodes are
+ *     extrq/insertq/AVX512 convert vector ops.
+ * 0f ae - group15: [f]xsave,[f]xrstor,[v]{ld,st}mxcsr,clflush[opt],
+ *     {rd,wr}{fs,gs}base,{s,l,m}fence.
+ *     Why? They are all user-executable.
+ */
 static volatile u32 good_2byte_insns[256 / 32] = {
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
        /*      ----------------------------------------------         */
-       W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */
-       W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
-       W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
-       W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */
+       W(0x00, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1) | /* 00 */
+       W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 10 */
+       W(0x20, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */
+       W(0x30, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1) , /* 30 */
        W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */
        W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */
        W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */
-       W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */
+       W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1) , /* 70 */
        W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */
        W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */
-       W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
-       W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
+       W(0xa0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */
+       W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* b0 */
        W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */
-       W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
+       W(0xd0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */
        W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */
-       W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0)   /* f0 */
+       W(0xf0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1)   /* f0 */
        /*      ----------------------------------------------         */
        /*      0  1  2  3  4  5  6  7  8  9  a  b  c  d  e  f         */
 };
 #undef W
 
 /*
- * opcodes we'll probably never support:
- *
- *  6c-6d, e4-e5, ec-ed - in
- *  6e-6f, e6-e7, ee-ef - out
- *  cc, cd - int3, int
- *  cf - iret
- *  d6 - illegal instruction
- *  f1 - int1/icebp
- *  f4 - hlt
- *  fa, fb - cli, sti
- *  0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2
- *
- * invalid opcodes in 64-bit mode:
- *
- *  06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5
- *  63 - we support this opcode in x86_64 but not in i386.
- *
  * opcodes we may need to refine support for:
  *
  *  0f - 2-byte instructions: For many of these instructions, the validity
index 553c094b9cd7984b7334a95122931a93249f1ddf..a110efca6d068f7d881f8c1d955d8a6906457f64 100644 (file)
@@ -238,6 +238,31 @@ static void __init_refok adjust_range_page_size_mask(struct map_range *mr,
        }
 }
 
+static const char *page_size_string(struct map_range *mr)
+{
+       static const char str_1g[] = "1G";
+       static const char str_2m[] = "2M";
+       static const char str_4m[] = "4M";
+       static const char str_4k[] = "4k";
+
+       if (mr->page_size_mask & (1<<PG_LEVEL_1G))
+               return str_1g;
+       /*
+        * 32-bit without PAE has a 4M large page size.
+        * PG_LEVEL_2M is misnamed, but we can at least
+        * print out the right size in the string.
+        */
+       if (IS_ENABLED(CONFIG_X86_32) &&
+           !IS_ENABLED(CONFIG_X86_PAE) &&
+           mr->page_size_mask & (1<<PG_LEVEL_2M))
+               return str_4m;
+
+       if (mr->page_size_mask & (1<<PG_LEVEL_2M))
+               return str_2m;
+
+       return str_4k;
+}
+
 static int __meminit split_mem_range(struct map_range *mr, int nr_range,
                                     unsigned long start,
                                     unsigned long end)
@@ -333,8 +358,7 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range,
        for (i = 0; i < nr_range; i++)
                printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
                                mr[i].start, mr[i].end - 1,
-                       (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
-                        (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
+                               page_size_string(&mr[i]));
 
        return nr_range;
 }
index 919b91205cd4be57760c50956eddb2d02dc13c45..df4552bd239e03b4a02e6505454e41420d530461 100644 (file)
@@ -35,12 +35,12 @@ struct va_alignment __read_mostly va_align = {
        .flags = -1,
 };
 
-static unsigned int stack_maxrandom_size(void)
+static unsigned long stack_maxrandom_size(void)
 {
-       unsigned int max = 0;
+       unsigned long max = 0;
        if ((current->flags & PF_RANDOMIZE) &&
                !(current->personality & ADDR_NO_RANDOMIZE)) {
-               max = ((-1U) & STACK_RND_MASK) << PAGE_SHIFT;
+               max = ((-1UL) & STACK_RND_MASK) << PAGE_SHIFT;
        }
 
        return max;
index 85afde1fa3e5f6465e40a0599e17669a934b88b9..a62e0be3a2f1b4f563ab5f6fda0acb59612ea523 100644 (file)
@@ -5,6 +5,7 @@ obj-y   += geode/
 obj-y  += goldfish/
 obj-y  += iris/
 obj-y  += intel-mid/
+obj-y  += intel-quark/
 obj-y  += olpc/
 obj-y  += scx200/
 obj-y  += sfi/
index 5fcda7272550a79b52660030571adeaf9d0f197b..86d0f9e08dd95eb1023d5ac7ec4fb006aafb72c9 100644 (file)
@@ -91,167 +91,6 @@ ENTRY(efi_call)
        ret
 ENDPROC(efi_call)
 
-#ifdef CONFIG_EFI_MIXED
-
-/*
- * We run this function from the 1:1 mapping.
- *
- * This function must be invoked with a 1:1 mapped stack.
- */
-ENTRY(__efi64_thunk)
-       movl    %ds, %eax
-       push    %rax
-       movl    %es, %eax
-       push    %rax
-       movl    %ss, %eax
-       push    %rax
-
-       subq    $32, %rsp
-       movl    %esi, 0x0(%rsp)
-       movl    %edx, 0x4(%rsp)
-       movl    %ecx, 0x8(%rsp)
-       movq    %r8, %rsi
-       movl    %esi, 0xc(%rsp)
-       movq    %r9, %rsi
-       movl    %esi,  0x10(%rsp)
-
-       sgdt    save_gdt(%rip)
-
-       leaq    1f(%rip), %rbx
-       movq    %rbx, func_rt_ptr(%rip)
-
-       /* Switch to gdt with 32-bit segments */
-       movl    64(%rsp), %eax
-       lgdt    (%rax)
-
-       leaq    efi_enter32(%rip), %rax
-       pushq   $__KERNEL_CS
-       pushq   %rax
-       lretq
-
-1:     addq    $32, %rsp
-
-       lgdt    save_gdt(%rip)
-
-       pop     %rbx
-       movl    %ebx, %ss
-       pop     %rbx
-       movl    %ebx, %es
-       pop     %rbx
-       movl    %ebx, %ds
-
-       /*
-        * Convert 32-bit status code into 64-bit.
-        */
-       test    %rax, %rax
-       jz      1f
-       movl    %eax, %ecx
-       andl    $0x0fffffff, %ecx
-       andl    $0xf0000000, %eax
-       shl     $32, %rax
-       or      %rcx, %rax
-1:
-       ret
-ENDPROC(__efi64_thunk)
-
-ENTRY(efi_exit32)
-       movq    func_rt_ptr(%rip), %rax
-       push    %rax
-       mov     %rdi, %rax
-       ret
-ENDPROC(efi_exit32)
-
-       .code32
-/*
- * EFI service pointer must be in %edi.
- *
- * The stack should represent the 32-bit calling convention.
- */
-ENTRY(efi_enter32)
-       movl    $__KERNEL_DS, %eax
-       movl    %eax, %ds
-       movl    %eax, %es
-       movl    %eax, %ss
-
-       /* Reload pgtables */
-       movl    %cr3, %eax
-       movl    %eax, %cr3
-
-       /* Disable paging */
-       movl    %cr0, %eax
-       btrl    $X86_CR0_PG_BIT, %eax
-       movl    %eax, %cr0
-
-       /* Disable long mode via EFER */
-       movl    $MSR_EFER, %ecx
-       rdmsr
-       btrl    $_EFER_LME, %eax
-       wrmsr
-
-       call    *%edi
-
-       /* We must preserve return value */
-       movl    %eax, %edi
-
-       /*
-        * Some firmware will return with interrupts enabled. Be sure to
-        * disable them before we switch GDTs.
-        */
-       cli
-
-       movl    68(%esp), %eax
-       movl    %eax, 2(%eax)
-       lgdtl   (%eax)
-
-       movl    %cr4, %eax
-       btsl    $(X86_CR4_PAE_BIT), %eax
-       movl    %eax, %cr4
-
-       movl    %cr3, %eax
-       movl    %eax, %cr3
-
-       movl    $MSR_EFER, %ecx
-       rdmsr
-       btsl    $_EFER_LME, %eax
-       wrmsr
-
-       xorl    %eax, %eax
-       lldt    %ax
-
-       movl    72(%esp), %eax
-       pushl   $__KERNEL_CS
-       pushl   %eax
-
-       /* Enable paging */
-       movl    %cr0, %eax
-       btsl    $X86_CR0_PG_BIT, %eax
-       movl    %eax, %cr0
-       lret
-ENDPROC(efi_enter32)
-
-       .data
-       .balign 8
-       .global efi32_boot_gdt
-efi32_boot_gdt:        .word   0
-               .quad   0
-
-save_gdt:      .word   0
-               .quad   0
-func_rt_ptr:   .quad   0
-
-       .global efi_gdt64
-efi_gdt64:
-       .word   efi_gdt64_end - efi_gdt64
-       .long   0                       /* Filled out by user */
-       .word   0
-       .quad   0x0000000000000000      /* NULL descriptor */
-       .quad   0x00af9a000000ffff      /* __KERNEL_CS */
-       .quad   0x00cf92000000ffff      /* __KERNEL_DS */
-       .quad   0x0080890000000000      /* TS descriptor */
-       .quad   0x0000000000000000      /* TS continued */
-efi_gdt64_end:
-#endif /* CONFIG_EFI_MIXED */
-
        .data
 ENTRY(efi_scratch)
        .fill 3,8,0
index 8806fa73e6e6d22337ff69c708583adf3c38cbbf..ff85d28c50f261c728eb0731391706b1af3777af 100644 (file)
@@ -1,9 +1,26 @@
 /*
  * Copyright (C) 2014 Intel Corporation; author Matt Fleming
+ *
+ * Support for invoking 32-bit EFI runtime services from a 64-bit
+ * kernel.
+ *
+ * The below thunking functions are only used after ExitBootServices()
+ * has been called. This simplifies things considerably as compared with
+ * the early EFI thunking because we can leave all the kernel state
+ * intact (GDT, IDT, etc) and simply invoke the the 32-bit EFI runtime
+ * services from __KERNEL32_CS. This means we can continue to service
+ * interrupts across an EFI mixed mode call.
+ *
+ * We do however, need to handle the fact that we're running in a full
+ * 64-bit virtual address space. Things like the stack and instruction
+ * addresses need to be accessible by the 32-bit firmware, so we rely on
+ * using the identity mappings in the EFI page table to access the stack
+ * and kernel text (see efi_setup_page_tables()).
  */
 
 #include <linux/linkage.h>
 #include <asm/page_types.h>
+#include <asm/segment.h>
 
        .text
        .code64
@@ -33,14 +50,6 @@ ENTRY(efi64_thunk)
        leaq    efi_exit32(%rip), %rbx
        subq    %rax, %rbx
        movl    %ebx, 8(%rsp)
-       leaq    efi_gdt64(%rip), %rbx
-       subq    %rax, %rbx
-       movl    %ebx, 2(%ebx)
-       movl    %ebx, 4(%rsp)
-       leaq    efi_gdt32(%rip), %rbx
-       subq    %rax, %rbx
-       movl    %ebx, 2(%ebx)
-       movl    %ebx, (%rsp)
 
        leaq    __efi64_thunk(%rip), %rbx
        subq    %rax, %rbx
@@ -52,14 +61,92 @@ ENTRY(efi64_thunk)
        retq
 ENDPROC(efi64_thunk)
 
-       .data
-efi_gdt32:
-       .word   efi_gdt32_end - efi_gdt32
-       .long   0                       /* Filled out above */
-       .word   0
-       .quad   0x0000000000000000      /* NULL descriptor */
-       .quad   0x00cf9a000000ffff      /* __KERNEL_CS */
-       .quad   0x00cf93000000ffff      /* __KERNEL_DS */
-efi_gdt32_end:
+/*
+ * We run this function from the 1:1 mapping.
+ *
+ * This function must be invoked with a 1:1 mapped stack.
+ */
+ENTRY(__efi64_thunk)
+       movl    %ds, %eax
+       push    %rax
+       movl    %es, %eax
+       push    %rax
+       movl    %ss, %eax
+       push    %rax
+
+       subq    $32, %rsp
+       movl    %esi, 0x0(%rsp)
+       movl    %edx, 0x4(%rsp)
+       movl    %ecx, 0x8(%rsp)
+       movq    %r8, %rsi
+       movl    %esi, 0xc(%rsp)
+       movq    %r9, %rsi
+       movl    %esi,  0x10(%rsp)
+
+       leaq    1f(%rip), %rbx
+       movq    %rbx, func_rt_ptr(%rip)
+
+       /* Switch to 32-bit descriptor */
+       pushq   $__KERNEL32_CS
+       leaq    efi_enter32(%rip), %rax
+       pushq   %rax
+       lretq
+
+1:     addq    $32, %rsp
+
+       pop     %rbx
+       movl    %ebx, %ss
+       pop     %rbx
+       movl    %ebx, %es
+       pop     %rbx
+       movl    %ebx, %ds
 
+       /*
+        * Convert 32-bit status code into 64-bit.
+        */
+       test    %rax, %rax
+       jz      1f
+       movl    %eax, %ecx
+       andl    $0x0fffffff, %ecx
+       andl    $0xf0000000, %eax
+       shl     $32, %rax
+       or      %rcx, %rax
+1:
+       ret
+ENDPROC(__efi64_thunk)
+
+ENTRY(efi_exit32)
+       movq    func_rt_ptr(%rip), %rax
+       push    %rax
+       mov     %rdi, %rax
+       ret
+ENDPROC(efi_exit32)
+
+       .code32
+/*
+ * EFI service pointer must be in %edi.
+ *
+ * The stack should represent the 32-bit calling convention.
+ */
+ENTRY(efi_enter32)
+       movl    $__KERNEL_DS, %eax
+       movl    %eax, %ds
+       movl    %eax, %es
+       movl    %eax, %ss
+
+       call    *%edi
+
+       /* We must preserve return value */
+       movl    %eax, %edi
+
+       movl    72(%esp), %eax
+       pushl   $__KERNEL_CS
+       pushl   %eax
+
+       lret
+ENDPROC(efi_enter32)
+
+       .data
+       .balign 8
+func_rt_ptr:           .quad 0
 efi_saved_sp:          .quad 0
diff --git a/arch/x86/platform/intel-quark/Makefile b/arch/x86/platform/intel-quark/Makefile
new file mode 100644 (file)
index 0000000..9cc57ed
--- /dev/null
@@ -0,0 +1,2 @@
+obj-$(CONFIG_INTEL_IMR) += imr.o
+obj-$(CONFIG_DEBUG_IMR_SELFTEST) += imr_selftest.o
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
new file mode 100644 (file)
index 0000000..0ee619f
--- /dev/null
@@ -0,0 +1,661 @@
+/**
+ * imr.c
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * IMR registers define an isolated region of memory that can
+ * be masked to prohibit certain system agents from accessing memory.
+ * When a device behind a masked port performs an access - snooped or
+ * not, an IMR may optionally prevent that transaction from changing
+ * the state of memory or from getting correct data in response to the
+ * operation.
+ *
+ * Write data will be dropped and reads will return 0xFFFFFFFF, the
+ * system will reset and system BIOS will print out an error message to
+ * inform the user that an IMR has been violated.
+ *
+ * This code is based on the Linux MTRR code and reference code from
+ * Intel's Quark BSP EFI, Linux and grub code.
+ *
+ * See quark-x1000-datasheet.pdf for register definitions.
+ * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm-generic/sections.h>
+#include <asm/cpu_device_id.h>
+#include <asm/imr.h>
+#include <asm/iosf_mbi.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+struct imr_device {
+       struct dentry   *file;
+       bool            init;
+       struct mutex    lock;
+       int             max_imr;
+       int             reg_base;
+};
+
+static struct imr_device imr_dev;
+
+/*
+ * IMR read/write mask control registers.
+ * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for
+ * bit definitions.
+ *
+ * addr_hi
+ * 31          Lock bit
+ * 30:24       Reserved
+ * 23:2                1 KiB aligned lo address
+ * 1:0         Reserved
+ *
+ * addr_hi
+ * 31:24       Reserved
+ * 23:2                1 KiB aligned hi address
+ * 1:0         Reserved
+ */
+#define IMR_LOCK       BIT(31)
+
+struct imr_regs {
+       u32 addr_lo;
+       u32 addr_hi;
+       u32 rmask;
+       u32 wmask;
+};
+
+#define IMR_NUM_REGS   (sizeof(struct imr_regs)/sizeof(u32))
+#define IMR_SHIFT      8
+#define imr_to_phys(x) ((x) << IMR_SHIFT)
+#define phys_to_imr(x) ((x) >> IMR_SHIFT)
+
+/**
+ * imr_is_enabled - true if an IMR is enabled false otherwise.
+ *
+ * Determines if an IMR is enabled based on address range and read/write
+ * mask. An IMR set with an address range set to zero and a read/write
+ * access mask set to all is considered to be disabled. An IMR in any
+ * other state - for example set to zero but without read/write access
+ * all is considered to be enabled. This definition of disabled is how
+ * firmware switches off an IMR and is maintained in kernel for
+ * consistency.
+ *
+ * @imr:       pointer to IMR descriptor.
+ * @return:    true if IMR enabled false if disabled.
+ */
+static inline int imr_is_enabled(struct imr_regs *imr)
+{
+       return !(imr->rmask == IMR_READ_ACCESS_ALL &&
+                imr->wmask == IMR_WRITE_ACCESS_ALL &&
+                imr_to_phys(imr->addr_lo) == 0 &&
+                imr_to_phys(imr->addr_hi) == 0);
+}
+
+/**
+ * imr_read - read an IMR at a given index.
+ *
+ * Requires caller to hold imr mutex.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @imr_id:    IMR entry to read.
+ * @imr:       IMR structure representing address and access masks.
+ * @return:    0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
+{
+       u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
+       int ret;
+
+       ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+                               reg++, &imr->addr_lo);
+       if (ret)
+               return ret;
+
+       ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+                               reg++, &imr->addr_hi);
+       if (ret)
+               return ret;
+
+       ret = iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+                               reg++, &imr->rmask);
+       if (ret)
+               return ret;
+
+       return iosf_mbi_read(QRK_MBI_UNIT_MM, QRK_MBI_MM_READ,
+                               reg++, &imr->wmask);
+}
+
+/**
+ * imr_write - write an IMR at a given index.
+ *
+ * Requires caller to hold imr mutex.
+ * Note lock bits need to be written independently of address bits.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @imr_id:    IMR entry to write.
+ * @imr:       IMR structure representing address and access masks.
+ * @lock:      indicates if the IMR lock bit should be applied.
+ * @return:    0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_write(struct imr_device *idev, u32 imr_id,
+                    struct imr_regs *imr, bool lock)
+{
+       unsigned long flags;
+       u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
+       int ret;
+
+       local_irq_save(flags);
+
+       ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE, reg++,
+                               imr->addr_lo);
+       if (ret)
+               goto failed;
+
+       ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+                               reg++, imr->addr_hi);
+       if (ret)
+               goto failed;
+
+       ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+                               reg++, imr->rmask);
+       if (ret)
+               goto failed;
+
+       ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+                               reg++, imr->wmask);
+       if (ret)
+               goto failed;
+
+       /* Lock bit must be set separately to addr_lo address bits. */
+       if (lock) {
+               imr->addr_lo |= IMR_LOCK;
+               ret = iosf_mbi_write(QRK_MBI_UNIT_MM, QRK_MBI_MM_WRITE,
+                                       reg - IMR_NUM_REGS, imr->addr_lo);
+               if (ret)
+                       goto failed;
+       }
+
+       local_irq_restore(flags);
+       return 0;
+failed:
+       /*
+        * If writing to the IOSF failed then we're in an unknown state,
+        * likely a very bad state. An IMR in an invalid state will almost
+        * certainly lead to a memory access violation.
+        */
+       local_irq_restore(flags);
+       WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n",
+            imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK);
+
+       return ret;
+}
+
+/**
+ * imr_dbgfs_state_show - print state of IMR registers.
+ *
+ * @s:         pointer to seq_file for output.
+ * @unused:    unused parameter.
+ * @return:    0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
+{
+       phys_addr_t base;
+       phys_addr_t end;
+       int i;
+       struct imr_device *idev = s->private;
+       struct imr_regs imr;
+       size_t size;
+       int ret = -ENODEV;
+
+       mutex_lock(&idev->lock);
+
+       for (i = 0; i < idev->max_imr; i++) {
+
+               ret = imr_read(idev, i, &imr);
+               if (ret)
+                       break;
+
+               /*
+                * Remember to add IMR_ALIGN bytes to size to indicate the
+                * inherent IMR_ALIGN size bytes contained in the masked away
+                * lower ten bits.
+                */
+               if (imr_is_enabled(&imr)) {
+                       base = imr_to_phys(imr.addr_lo);
+                       end = imr_to_phys(imr.addr_hi) + IMR_MASK;
+               } else {
+                       base = 0;
+                       end = 0;
+               }
+               size = end - base;
+               seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
+                          "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
+                          &base, &end, size, imr.rmask, imr.wmask,
+                          imr_is_enabled(&imr) ? "enabled " : "disabled",
+                          imr.addr_lo & IMR_LOCK ? "locked" : "unlocked");
+       }
+
+       mutex_unlock(&idev->lock);
+       return ret;
+}
+
+/**
+ * imr_state_open - debugfs open callback.
+ *
+ * @inode:     pointer to struct inode.
+ * @file:      pointer to struct file.
+ * @return:    result of single open.
+ */
+static int imr_state_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, imr_dbgfs_state_show, inode->i_private);
+}
+
+static const struct file_operations imr_state_ops = {
+       .open           = imr_state_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+/**
+ * imr_debugfs_register - register debugfs hooks.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @return:    0 on success - errno on failure.
+ */
+static int imr_debugfs_register(struct imr_device *idev)
+{
+       idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL,
+                                        idev, &imr_state_ops);
+       return PTR_ERR_OR_ZERO(idev->file);
+}
+
+/**
+ * imr_debugfs_unregister - unregister debugfs hooks.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @return:
+ */
+static void imr_debugfs_unregister(struct imr_device *idev)
+{
+       debugfs_remove(idev->file);
+}
+
+/**
+ * imr_check_params - check passed address range IMR alignment and non-zero size
+ *
+ * @base:      base address of intended IMR.
+ * @size:      size of intended IMR.
+ * @return:    zero on valid range -EINVAL on unaligned base/size.
+ */
+static int imr_check_params(phys_addr_t base, size_t size)
+{
+       if ((base & IMR_MASK) || (size & IMR_MASK)) {
+               pr_err("base %pa size 0x%08zx must align to 1KiB\n",
+                       &base, size);
+               return -EINVAL;
+       }
+       if (size == 0)
+               return -EINVAL;
+
+       return 0;
+}
+
+/**
+ * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends.
+ *
+ * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the
+ * value in the register. We need to subtract IMR_ALIGN bytes from input sizes
+ * as a result.
+ *
+ * @size:      input size bytes.
+ * @return:    reduced size.
+ */
+static inline size_t imr_raw_size(size_t size)
+{
+       return size - IMR_ALIGN;
+}
+
+/**
+ * imr_address_overlap - detects an address overlap.
+ *
+ * @addr:      address to check against an existing IMR.
+ * @imr:       imr being checked.
+ * @return:    true for overlap false for no overlap.
+ */
+static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
+{
+       return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi);
+}
+
+/**
+ * imr_add_range - add an Isolated Memory Region.
+ *
+ * @base:      physical base address of region aligned to 1KiB.
+ * @size:      physical size of region in bytes must be aligned to 1KiB.
+ * @read_mask: read access mask.
+ * @write_mask:        write access mask.
+ * @lock:      indicates whether or not to permanently lock this region.
+ * @return:    zero on success or negative value indicating error.
+ */
+int imr_add_range(phys_addr_t base, size_t size,
+                 unsigned int rmask, unsigned int wmask, bool lock)
+{
+       phys_addr_t end;
+       unsigned int i;
+       struct imr_device *idev = &imr_dev;
+       struct imr_regs imr;
+       size_t raw_size;
+       int reg;
+       int ret;
+
+       if (WARN_ONCE(idev->init == false, "driver not initialized"))
+               return -ENODEV;
+
+       ret = imr_check_params(base, size);
+       if (ret)
+               return ret;
+
+       /* Tweak the size value. */
+       raw_size = imr_raw_size(size);
+       end = base + raw_size;
+
+       /*
+        * Check for reserved IMR value common to firmware, kernel and grub
+        * indicating a disabled IMR.
+        */
+       imr.addr_lo = phys_to_imr(base);
+       imr.addr_hi = phys_to_imr(end);
+       imr.rmask = rmask;
+       imr.wmask = wmask;
+       if (!imr_is_enabled(&imr))
+               return -ENOTSUPP;
+
+       mutex_lock(&idev->lock);
+
+       /*
+        * Find a free IMR while checking for an existing overlapping range.
+        * Note there's no restriction in silicon to prevent IMR overlaps.
+        * For the sake of simplicity and ease in defining/debugging an IMR
+        * memory map we exclude IMR overlaps.
+        */
+       reg = -1;
+       for (i = 0; i < idev->max_imr; i++) {
+               ret = imr_read(idev, i, &imr);
+               if (ret)
+                       goto failed;
+
+               /* Find overlap @ base or end of requested range. */
+               ret = -EINVAL;
+               if (imr_is_enabled(&imr)) {
+                       if (imr_address_overlap(base, &imr))
+                               goto failed;
+                       if (imr_address_overlap(end, &imr))
+                               goto failed;
+               } else {
+                       reg = i;
+               }
+       }
+
+       /* Error out if we have no free IMR entries. */
+       if (reg == -1) {
+               ret = -ENOMEM;
+               goto failed;
+       }
+
+       pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n",
+                reg, &base, &end, raw_size, rmask, wmask);
+
+       /* Enable IMR at specified range and access mask. */
+       imr.addr_lo = phys_to_imr(base);
+       imr.addr_hi = phys_to_imr(end);
+       imr.rmask = rmask;
+       imr.wmask = wmask;
+
+       ret = imr_write(idev, reg, &imr, lock);
+       if (ret < 0) {
+               /*
+                * In the highly unlikely event iosf_mbi_write failed
+                * attempt to rollback the IMR setup skipping the trapping
+                * of further IOSF write failures.
+                */
+               imr.addr_lo = 0;
+               imr.addr_hi = 0;
+               imr.rmask = IMR_READ_ACCESS_ALL;
+               imr.wmask = IMR_WRITE_ACCESS_ALL;
+               imr_write(idev, reg, &imr, false);
+       }
+failed:
+       mutex_unlock(&idev->lock);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(imr_add_range);
+
+/**
+ * __imr_remove_range - delete an Isolated Memory Region.
+ *
+ * This function allows you to delete an IMR by its index specified by reg or
+ * by address range specified by base and size respectively. If you specify an
+ * index on its own the base and size parameters are ignored.
+ * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored.
+ * imr_remove_range(-1, base, size); delete IMR from base to base+size.
+ *
+ * @reg:       imr index to remove.
+ * @base:      physical base address of region aligned to 1 KiB.
+ * @size:      physical size of region in bytes aligned to 1 KiB.
+ * @return:    -EINVAL on invalid range or out or range id
+ *             -ENODEV if reg is valid but no IMR exists or is locked
+ *             0 on success.
+ */
+static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
+{
+       phys_addr_t end;
+       bool found = false;
+       unsigned int i;
+       struct imr_device *idev = &imr_dev;
+       struct imr_regs imr;
+       size_t raw_size;
+       int ret = 0;
+
+       if (WARN_ONCE(idev->init == false, "driver not initialized"))
+               return -ENODEV;
+
+       /*
+        * Validate address range if deleting by address, else we are
+        * deleting by index where base and size will be ignored.
+        */
+       if (reg == -1) {
+               ret = imr_check_params(base, size);
+               if (ret)
+                       return ret;
+       }
+
+       /* Tweak the size value. */
+       raw_size = imr_raw_size(size);
+       end = base + raw_size;
+
+       mutex_lock(&idev->lock);
+
+       if (reg >= 0) {
+               /* If a specific IMR is given try to use it. */
+               ret = imr_read(idev, reg, &imr);
+               if (ret)
+                       goto failed;
+
+               if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) {
+                       ret = -ENODEV;
+                       goto failed;
+               }
+               found = true;
+       } else {
+               /* Search for match based on address range. */
+               for (i = 0; i < idev->max_imr; i++) {
+                       ret = imr_read(idev, i, &imr);
+                       if (ret)
+                               goto failed;
+
+                       if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK)
+                               continue;
+
+                       if ((imr_to_phys(imr.addr_lo) == base) &&
+                           (imr_to_phys(imr.addr_hi) == end)) {
+                               found = true;
+                               reg = i;
+                               break;
+                       }
+               }
+       }
+
+       if (!found) {
+               ret = -ENODEV;
+               goto failed;
+       }
+
+       pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size);
+
+       /* Tear down the IMR. */
+       imr.addr_lo = 0;
+       imr.addr_hi = 0;
+       imr.rmask = IMR_READ_ACCESS_ALL;
+       imr.wmask = IMR_WRITE_ACCESS_ALL;
+
+       ret = imr_write(idev, reg, &imr, false);
+
+failed:
+       mutex_unlock(&idev->lock);
+       return ret;
+}
+
+/**
+ * imr_remove_range - delete an Isolated Memory Region by address
+ *
+ * This function allows you to delete an IMR by an address range specified
+ * by base and size respectively.
+ * imr_remove_range(base, size); delete IMR from base to base+size.
+ *
+ * @base:      physical base address of region aligned to 1 KiB.
+ * @size:      physical size of region in bytes aligned to 1 KiB.
+ * @return:    -EINVAL on invalid range or out or range id
+ *             -ENODEV if reg is valid but no IMR exists or is locked
+ *             0 on success.
+ */
+int imr_remove_range(phys_addr_t base, size_t size)
+{
+       return __imr_remove_range(-1, base, size);
+}
+EXPORT_SYMBOL_GPL(imr_remove_range);
+
+/**
+ * imr_clear - delete an Isolated Memory Region by index
+ *
+ * This function allows you to delete an IMR by an address range specified
+ * by the index of the IMR. Useful for initial sanitization of the IMR
+ * address map.
+ * imr_ge(base, size); delete IMR from base to base+size.
+ *
+ * @reg:       imr index to remove.
+ * @return:    -EINVAL on invalid range or out or range id
+ *             -ENODEV if reg is valid but no IMR exists or is locked
+ *             0 on success.
+ */
+static inline int imr_clear(int reg)
+{
+       return __imr_remove_range(reg, 0, 0);
+}
+
+/**
+ * imr_fixup_memmap - Tear down IMRs used during bootup.
+ *
+ * BIOS and Grub both setup IMRs around compressed kernel, initrd memory
+ * that need to be removed before the kernel hands out one of the IMR
+ * encased addresses to a downstream DMA agent such as the SD or Ethernet.
+ * IMRs on Galileo are setup to immediately reset the system on violation.
+ * As a result if you're running a root filesystem from SD - you'll need
+ * the boot-time IMRs torn down or you'll find seemingly random resets when
+ * using your filesystem.
+ *
+ * @idev:      pointer to imr_device structure.
+ * @return:
+ */
+static void __init imr_fixup_memmap(struct imr_device *idev)
+{
+       phys_addr_t base = virt_to_phys(&_text);
+       size_t size = virt_to_phys(&__end_rodata) - base;
+       int i;
+       int ret;
+
+       /* Tear down all existing unlocked IMRs. */
+       for (i = 0; i < idev->max_imr; i++)
+               imr_clear(i);
+
+       /*
+        * Setup a locked IMR around the physical extent of the kernel
+        * from the beginning of the .text secton to the end of the
+        * .rodata section as one physically contiguous block.
+        */
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
+       if (ret < 0) {
+               pr_err("unable to setup IMR for kernel: (%p - %p)\n",
+                       &_text, &__end_rodata);
+       } else {
+               pr_info("protecting kernel .text - .rodata: %zu KiB (%p - %p)\n",
+                       size / 1024, &_text, &__end_rodata);
+       }
+
+}
+
+static const struct x86_cpu_id imr_ids[] __initconst = {
+       { X86_VENDOR_INTEL, 5, 9 },     /* Intel Quark SoC X1000. */
+       {}
+};
+MODULE_DEVICE_TABLE(x86cpu, imr_ids);
+
+/**
+ * imr_init - entry point for IMR driver.
+ *
+ * return: -ENODEV for no IMR support 0 if good to go.
+ */
+static int __init imr_init(void)
+{
+       struct imr_device *idev = &imr_dev;
+       int ret;
+
+       if (!x86_match_cpu(imr_ids) || !iosf_mbi_available())
+               return -ENODEV;
+
+       idev->max_imr = QUARK_X1000_IMR_MAX;
+       idev->reg_base = QUARK_X1000_IMR_REGBASE;
+       idev->init = true;
+
+       mutex_init(&idev->lock);
+       ret = imr_debugfs_register(idev);
+       if (ret != 0)
+               pr_warn("debugfs register failed!\n");
+       imr_fixup_memmap(idev);
+       return 0;
+}
+
+/**
+ * imr_exit - exit point for IMR code.
+ *
+ * Deregisters debugfs, leave IMR state as-is.
+ *
+ * return:
+ */
+static void __exit imr_exit(void)
+{
+       imr_debugfs_unregister(&imr_dev);
+}
+
+module_init(imr_init);
+module_exit(imr_exit);
+
+MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
+MODULE_DESCRIPTION("Intel Isolated Memory Region driver");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
new file mode 100644 (file)
index 0000000..c9a0838
--- /dev/null
@@ -0,0 +1,129 @@
+/**
+ * imr_selftest.c
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * IMR self test. The purpose of this module is to run a set of tests on the
+ * IMR API to validate it's sanity. We check for overlapping, reserved
+ * addresses and setup/teardown sanity.
+ *
+ */
+
+#include <asm-generic/sections.h>
+#include <asm/imr.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/types.h>
+
+#define SELFTEST KBUILD_MODNAME ": "
+/**
+ * imr_self_test_result - Print result string for self test.
+ *
+ * @res:       result code - true if test passed false otherwise.
+ * @fmt:       format string.
+ * ...         variadic argument list.
+ */
+static void __init imr_self_test_result(int res, const char *fmt, ...)
+{
+       va_list vlist;
+
+       /* Print pass/fail. */
+       if (res)
+               pr_info(SELFTEST "pass ");
+       else
+               pr_info(SELFTEST "fail ");
+
+       /* Print variable string. */
+       va_start(vlist, fmt);
+       vprintk(fmt, vlist);
+       va_end(vlist);
+
+       /* Optional warning. */
+       WARN(res == 0, "test failed");
+}
+#undef SELFTEST
+
+/**
+ * imr_self_test
+ *
+ * Verify IMR self_test with some simple tests to verify overlap,
+ * zero sized allocations and 1 KiB sized areas.
+ *
+ */
+static void __init imr_self_test(void)
+{
+       phys_addr_t base  = virt_to_phys(&_text);
+       size_t size = virt_to_phys(&__end_rodata) - base;
+       const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
+       int ret;
+
+       /* Test zero zero. */
+       ret = imr_add_range(0, 0, 0, 0, false);
+       imr_self_test_result(ret < 0, "zero sized IMR\n");
+
+       /* Test exact overlap. */
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+       imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+       /* Test overlap with base inside of existing. */
+       base += size - IMR_ALIGN;
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+       imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+       /* Test overlap with end inside of existing. */
+       base -= size + IMR_ALIGN * 2;
+       ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
+       imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+       /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */
+       ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL,
+                           IMR_WRITE_ACCESS_ALL, false);
+       imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n");
+
+       /* Test that a 1 KiB IMR @ zero with CPU only will work. */
+       ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false);
+       imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n");
+       if (ret >= 0) {
+               ret = imr_remove_range(0, IMR_ALIGN);
+               imr_self_test_result(ret == 0, "teardown - cpu-access\n");
+       }
+
+       /* Test 2 KiB works. */
+       size = IMR_ALIGN * 2;
+       ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL,
+                           IMR_WRITE_ACCESS_ALL, false);
+       imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n");
+       if (ret >= 0) {
+               ret = imr_remove_range(0, size);
+               imr_self_test_result(ret == 0, "teardown 2KiB\n");
+       }
+}
+
+/**
+ * imr_self_test_init - entry point for IMR driver.
+ *
+ * return: -ENODEV for no IMR support 0 if good to go.
+ */
+static int __init imr_self_test_init(void)
+{
+       imr_self_test();
+       return 0;
+}
+
+/**
+ * imr_self_test_exit - exit point for IMR code.
+ *
+ * return:
+ */
+static void __exit imr_self_test_exit(void)
+{
+}
+
+module_init(imr_self_test_init);
+module_exit(imr_self_test_exit);
+
+MODULE_AUTHOR("Bryan O'Donoghue <pure.logic@nexus-software.ie>");
+MODULE_DESCRIPTION("Intel Isolated Memory Region self-test driver");
+MODULE_LICENSE("Dual BSD/GPL");
index 23b45eb9a89ce4d56f8a73e181d004b107dc1f81..956374c1edbc31e4c1eb50c3fb29cb8828ad44b5 100644 (file)
@@ -41,7 +41,7 @@ static u8 zero_stats;
 static inline void check_zero(void)
 {
        u8 ret;
-       u8 old = ACCESS_ONCE(zero_stats);
+       u8 old = READ_ONCE(zero_stats);
        if (unlikely(old)) {
                ret = cmpxchg(&zero_stats, old, 0);
                /* This ensures only one fellow resets the stat */
@@ -112,6 +112,7 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
        struct xen_lock_waiting *w = this_cpu_ptr(&lock_waiting);
        int cpu = smp_processor_id();
        u64 start;
+       __ticket_t head;
        unsigned long flags;
 
        /* If kicker interrupts not initialized yet, just spin */
@@ -159,11 +160,15 @@ __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
         */
        __ticket_enter_slowpath(lock);
 
+       /* make sure enter_slowpath, which is atomic does not cross the read */
+       smp_mb__after_atomic();
+
        /*
         * check again make sure it didn't become free while
         * we weren't looking
         */
-       if (ACCESS_ONCE(lock->tickets.head) == want) {
+       head = READ_ONCE(lock->tickets.head);
+       if (__tickets_equal(head, want)) {
                add_stats(TAKEN_SLOW_PICKUP, 1);
                goto out;
        }
@@ -204,8 +209,8 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
                const struct xen_lock_waiting *w = &per_cpu(lock_waiting, cpu);
 
                /* Make sure we read lock before want */
-               if (ACCESS_ONCE(w->lock) == lock &&
-                   ACCESS_ONCE(w->want) == next) {
+               if (READ_ONCE(w->lock) == lock &&
+                   READ_ONCE(w->want) == next) {
                        add_stats(RELEASED_SLOW_KICKED, 1);
                        xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
                        break;
index 9273d0969ebd6377a02cf0be189d34ea619afe3e..5b9c6d5c3636ad6412c7b898c44e4709b35597d1 100644 (file)
@@ -1292,6 +1292,9 @@ static u64 tg_prfill_cpu_rwstat(struct seq_file *sf,
        struct blkg_rwstat rwstat = { }, tmp;
        int i, cpu;
 
+       if (tg->stats_cpu == NULL)
+               return 0;
+
        for_each_possible_cpu(cpu) {
                struct tg_stats_cpu *sc = per_cpu_ptr(tg->stats_cpu, cpu);
 
index b18cd2151ddb244e1961c48165ab973d88880d71..623b117ad1a23ee09ac7aca1ca5d028b51ec2738 100644 (file)
@@ -55,6 +55,7 @@ acpi-$(CONFIG_ACPI_PROCFS_POWER) += cm_sbs.o
 ifdef CONFIG_ACPI_VIDEO
 acpi-y                         += video_detect.o
 endif
+acpi-y                         += acpi_lpat.o
 
 # These are (potentially) separate modules
 
diff --git a/drivers/acpi/acpi_lpat.c b/drivers/acpi/acpi_lpat.c
new file mode 100644 (file)
index 0000000..feb61c1
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * acpi_lpat.c - LPAT table processing functions
+ *
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <acpi/acpi_lpat.h>
+
+/**
+ * acpi_lpat_raw_to_temp(): Return temperature from raw value through
+ * LPAT conversion table
+ *
+ * @lpat_table: the temperature_raw mapping table structure
+ * @raw: the raw value, used as a key to get the temerature from the
+ *       above mapping table
+ *
+ * A positive converted temperarure value will be returned on success,
+ * a negative errno will be returned in error cases.
+ */
+int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
+                         int raw)
+{
+       int i, delta_temp, delta_raw, temp;
+       struct acpi_lpat *lpat = lpat_table->lpat;
+
+       for (i = 0; i < lpat_table->lpat_count - 1; i++) {
+               if ((raw >= lpat[i].raw && raw <= lpat[i+1].raw) ||
+                   (raw <= lpat[i].raw && raw >= lpat[i+1].raw))
+                       break;
+       }
+
+       if (i == lpat_table->lpat_count - 1)
+               return -ENOENT;
+
+       delta_temp = lpat[i+1].temp - lpat[i].temp;
+       delta_raw = lpat[i+1].raw - lpat[i].raw;
+       temp = lpat[i].temp + (raw - lpat[i].raw) * delta_temp / delta_raw;
+
+       return temp;
+}
+EXPORT_SYMBOL_GPL(acpi_lpat_raw_to_temp);
+
+/**
+ * acpi_lpat_temp_to_raw(): Return raw value from temperature through
+ * LPAT conversion table
+ *
+ * @lpat: the temperature_raw mapping table
+ * @temp: the temperature, used as a key to get the raw value from the
+ *        above mapping table
+ *
+ * A positive converted temperature value will be returned on success,
+ * a negative errno will be returned in error cases.
+ */
+int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
+                         int temp)
+{
+       int i, delta_temp, delta_raw, raw;
+       struct acpi_lpat *lpat = lpat_table->lpat;
+
+       for (i = 0; i < lpat_table->lpat_count - 1; i++) {
+               if (temp >= lpat[i].temp && temp <= lpat[i+1].temp)
+                       break;
+       }
+
+       if (i ==  lpat_table->lpat_count - 1)
+               return -ENOENT;
+
+       delta_temp = lpat[i+1].temp - lpat[i].temp;
+       delta_raw = lpat[i+1].raw - lpat[i].raw;
+       raw = lpat[i].raw + (temp - lpat[i].temp) * delta_raw / delta_temp;
+
+       return raw;
+}
+EXPORT_SYMBOL_GPL(acpi_lpat_temp_to_raw);
+
+/**
+ * acpi_lpat_get_conversion_table(): Parse ACPI LPAT table if present.
+ *
+ * @handle: Handle to acpi device
+ *
+ * Parse LPAT table to a struct of type acpi_lpat_table. On success
+ * it returns a pointer to newly allocated table. This table must
+ * be freed by the caller when finished processing, using a call to
+ * acpi_lpat_free_conversion_table.
+ */
+struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle
+                                                                 handle)
+{
+       struct acpi_lpat_conversion_table *lpat_table = NULL;
+       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *obj_p, *obj_e;
+       int *lpat, i;
+       acpi_status status;
+
+       status = acpi_evaluate_object(handle, "LPAT", NULL, &buffer);
+       if (ACPI_FAILURE(status))
+               return NULL;
+
+       obj_p = (union acpi_object *)buffer.pointer;
+       if (!obj_p || (obj_p->type != ACPI_TYPE_PACKAGE) ||
+           (obj_p->package.count % 2) || (obj_p->package.count < 4))
+               goto out;
+
+       lpat = kcalloc(obj_p->package.count, sizeof(int), GFP_KERNEL);
+       if (!lpat)
+               goto out;
+
+       for (i = 0; i < obj_p->package.count; i++) {
+               obj_e = &obj_p->package.elements[i];
+               if (obj_e->type != ACPI_TYPE_INTEGER) {
+                       kfree(lpat);
+                       goto out;
+               }
+               lpat[i] = (s64)obj_e->integer.value;
+       }
+
+       lpat_table = kzalloc(sizeof(*lpat_table), GFP_KERNEL);
+       if (!lpat_table) {
+               kfree(lpat);
+               goto out;
+       }
+
+       lpat_table->lpat = (struct acpi_lpat *)lpat;
+       lpat_table->lpat_count = obj_p->package.count / 2;
+
+out:
+       kfree(buffer.pointer);
+       return lpat_table;
+}
+EXPORT_SYMBOL_GPL(acpi_lpat_get_conversion_table);
+
+/**
+ * acpi_lpat_free_conversion_table(): Free LPAT table.
+ *
+ * @lpat_table: the temperature_raw mapping table structure
+ *
+ * Frees the LPAT table previously allocated by a call to
+ * acpi_lpat_get_conversion_table.
+ */
+void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
+                                    *lpat_table)
+{
+       if (lpat_table) {
+               kfree(lpat_table->lpat);
+               kfree(lpat_table);
+       }
+}
+EXPORT_SYMBOL_GPL(acpi_lpat_free_conversion_table);
+
+MODULE_LICENSE("GPL");
index 02e835f3cf8aa76326b9994768b75f2d39c39fed..657964e8ab7ed2ba9e56004d5f83881ad826e7b7 100644 (file)
@@ -105,7 +105,7 @@ static void lpss_uart_setup(struct lpss_private_data *pdata)
        }
 }
 
-static void byt_i2c_setup(struct lpss_private_data *pdata)
+static void lpss_deassert_reset(struct lpss_private_data *pdata)
 {
        unsigned int offset;
        u32 val;
@@ -114,9 +114,18 @@ static void byt_i2c_setup(struct lpss_private_data *pdata)
        val = readl(pdata->mmio_base + offset);
        val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
        writel(val, pdata->mmio_base + offset);
+}
+
+#define LPSS_I2C_ENABLE                        0x6c
+
+static void byt_i2c_setup(struct lpss_private_data *pdata)
+{
+       lpss_deassert_reset(pdata);
 
        if (readl(pdata->mmio_base + pdata->dev_desc->prv_offset))
                pdata->fixed_clk_rate = 133000000;
+
+       writel(0, pdata->mmio_base + LPSS_I2C_ENABLE);
 }
 
 static struct lpss_device_desc lpt_dev_desc = {
@@ -125,7 +134,7 @@ static struct lpss_device_desc lpt_dev_desc = {
 };
 
 static struct lpss_device_desc lpt_i2c_dev_desc = {
-       .flags = LPSS_CLK | LPSS_LTR,
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_LTR,
        .prv_offset = 0x800,
 };
 
@@ -166,6 +175,12 @@ static struct lpss_device_desc byt_i2c_dev_desc = {
        .setup = byt_i2c_setup,
 };
 
+static struct lpss_device_desc bsw_spi_dev_desc = {
+       .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX,
+       .prv_offset = 0x400,
+       .setup = lpss_deassert_reset,
+};
+
 #else
 
 #define LPSS_ADDR(desc) (0UL)
@@ -198,7 +213,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
        /* Braswell LPSS devices */
        { "80862288", LPSS_ADDR(byt_pwm_dev_desc) },
        { "8086228A", LPSS_ADDR(byt_uart_dev_desc) },
-       { "8086228E", LPSS_ADDR(byt_spi_dev_desc) },
+       { "8086228E", LPSS_ADDR(bsw_spi_dev_desc) },
        { "808622C1", LPSS_ADDR(byt_i2c_dev_desc) },
 
        { "INT3430", LPSS_ADDR(lpt_dev_desc) },
index 982b67faaaf32c0de360aa35449a699075a6550a..a8dd2f7633822b05f5fdeeaffe5a00496f4710be 100644 (file)
@@ -680,7 +680,7 @@ static void acpi_ec_start(struct acpi_ec *ec, bool resuming)
                /* Enable GPE for event processing (SCI_EVT=1) */
                if (!resuming)
                        acpi_ec_submit_request(ec);
-               pr_info("+++++ EC started +++++\n");
+               pr_debug("EC started\n");
        }
        spin_unlock_irqrestore(&ec->lock, flags);
 }
@@ -712,7 +712,7 @@ static void acpi_ec_stop(struct acpi_ec *ec, bool suspending)
                        acpi_ec_complete_request(ec);
                clear_bit(EC_FLAGS_STARTED, &ec->flags);
                clear_bit(EC_FLAGS_STOPPED, &ec->flags);
-               pr_info("+++++ EC stopped +++++\n");
+               pr_debug("EC stopped\n");
        }
        spin_unlock_irqrestore(&ec->lock, flags);
 }
index a732e5d7e322937ae66e537664fa02e677fda2bc..bd772cd5649466943ccb2e97ca57abff5e8238fe 100644 (file)
 #include <linux/module.h>
 #include <linux/acpi.h>
 #include <linux/regmap.h>
+#include <acpi/acpi_lpat.h>
 #include "intel_pmic.h"
 
 #define PMIC_POWER_OPREGION_ID         0x8d
 #define PMIC_THERMAL_OPREGION_ID       0x8c
 
-struct acpi_lpat {
-       int temp;
-       int raw;
-};
-
 struct intel_pmic_opregion {
        struct mutex lock;
-       struct acpi_lpat *lpat;
-       int lpat_count;
+       struct acpi_lpat_conversion_table *lpat_table;
        struct regmap *regmap;
        struct intel_pmic_opregion_data *data;
 };
@@ -50,105 +45,6 @@ static int pmic_get_reg_bit(int address, struct pmic_table *table,
        return -ENOENT;
 }
 
-/**
- * raw_to_temp(): Return temperature from raw value through LPAT table
- *
- * @lpat: the temperature_raw mapping table
- * @count: the count of the above mapping table
- * @raw: the raw value, used as a key to get the temerature from the
- *       above mapping table
- *
- * A positive value will be returned on success, a negative errno will
- * be returned in error cases.
- */
-static int raw_to_temp(struct acpi_lpat *lpat, int count, int raw)
-{
-       int i, delta_temp, delta_raw, temp;
-
-       for (i = 0; i < count - 1; i++) {
-               if ((raw >= lpat[i].raw && raw <= lpat[i+1].raw) ||
-                   (raw <= lpat[i].raw && raw >= lpat[i+1].raw))
-                       break;
-       }
-
-       if (i == count - 1)
-               return -ENOENT;
-
-       delta_temp = lpat[i+1].temp - lpat[i].temp;
-       delta_raw = lpat[i+1].raw - lpat[i].raw;
-       temp = lpat[i].temp + (raw - lpat[i].raw) * delta_temp / delta_raw;
-
-       return temp;
-}
-
-/**
- * temp_to_raw(): Return raw value from temperature through LPAT table
- *
- * @lpat: the temperature_raw mapping table
- * @count: the count of the above mapping table
- * @temp: the temperature, used as a key to get the raw value from the
- *        above mapping table
- *
- * A positive value will be returned on success, a negative errno will
- * be returned in error cases.
- */
-static int temp_to_raw(struct acpi_lpat *lpat, int count, int temp)
-{
-       int i, delta_temp, delta_raw, raw;
-
-       for (i = 0; i < count - 1; i++) {
-               if (temp >= lpat[i].temp && temp <= lpat[i+1].temp)
-                       break;
-       }
-
-       if (i == count - 1)
-               return -ENOENT;
-
-       delta_temp = lpat[i+1].temp - lpat[i].temp;
-       delta_raw = lpat[i+1].raw - lpat[i].raw;
-       raw = lpat[i].raw + (temp - lpat[i].temp) * delta_raw / delta_temp;
-
-       return raw;
-}
-
-static void pmic_thermal_lpat(struct intel_pmic_opregion *opregion,
-                             acpi_handle handle, struct device *dev)
-{
-       struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-       union acpi_object *obj_p, *obj_e;
-       int *lpat, i;
-       acpi_status status;
-
-       status = acpi_evaluate_object(handle, "LPAT", NULL, &buffer);
-       if (ACPI_FAILURE(status))
-               return;
-
-       obj_p = (union acpi_object *)buffer.pointer;
-       if (!obj_p || (obj_p->type != ACPI_TYPE_PACKAGE) ||
-           (obj_p->package.count % 2) || (obj_p->package.count < 4))
-               goto out;
-
-       lpat = devm_kmalloc(dev, sizeof(int) * obj_p->package.count,
-                           GFP_KERNEL);
-       if (!lpat)
-               goto out;
-
-       for (i = 0; i < obj_p->package.count; i++) {
-               obj_e = &obj_p->package.elements[i];
-               if (obj_e->type != ACPI_TYPE_INTEGER) {
-                       devm_kfree(dev, lpat);
-                       goto out;
-               }
-               lpat[i] = (s64)obj_e->integer.value;
-       }
-
-       opregion->lpat = (struct acpi_lpat *)lpat;
-       opregion->lpat_count = obj_p->package.count / 2;
-
-out:
-       kfree(buffer.pointer);
-}
-
 static acpi_status intel_pmic_power_handler(u32 function,
                acpi_physical_address address, u32 bits, u64 *value64,
                void *handler_context, void *region_context)
@@ -192,12 +88,12 @@ static int pmic_read_temp(struct intel_pmic_opregion *opregion,
        if (raw_temp < 0)
                return raw_temp;
 
-       if (!opregion->lpat) {
+       if (!opregion->lpat_table) {
                *value = raw_temp;
                return 0;
        }
 
-       temp = raw_to_temp(opregion->lpat, opregion->lpat_count, raw_temp);
+       temp = acpi_lpat_raw_to_temp(opregion->lpat_table, raw_temp);
        if (temp < 0)
                return temp;
 
@@ -223,9 +119,8 @@ static int pmic_thermal_aux(struct intel_pmic_opregion *opregion, int reg,
        if (!opregion->data->update_aux)
                return -ENXIO;
 
-       if (opregion->lpat) {
-               raw_temp = temp_to_raw(opregion->lpat, opregion->lpat_count,
-                                      *value);
+       if (opregion->lpat_table) {
+               raw_temp = acpi_lpat_temp_to_raw(opregion->lpat_table, *value);
                if (raw_temp < 0)
                        return raw_temp;
        } else {
@@ -314,6 +209,7 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
 {
        acpi_status status;
        struct intel_pmic_opregion *opregion;
+       int ret;
 
        if (!dev || !regmap || !d)
                return -EINVAL;
@@ -327,14 +223,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
 
        mutex_init(&opregion->lock);
        opregion->regmap = regmap;
-       pmic_thermal_lpat(opregion, handle, dev);
+       opregion->lpat_table = acpi_lpat_get_conversion_table(handle);
 
        status = acpi_install_address_space_handler(handle,
                                                    PMIC_POWER_OPREGION_ID,
                                                    intel_pmic_power_handler,
                                                    NULL, opregion);
-       if (ACPI_FAILURE(status))
-               return -ENODEV;
+       if (ACPI_FAILURE(status)) {
+               ret = -ENODEV;
+               goto out_error;
+       }
 
        status = acpi_install_address_space_handler(handle,
                                                    PMIC_THERMAL_OPREGION_ID,
@@ -343,11 +241,16 @@ int intel_pmic_install_opregion_handler(struct device *dev, acpi_handle handle,
        if (ACPI_FAILURE(status)) {
                acpi_remove_address_space_handler(handle, PMIC_POWER_OPREGION_ID,
                                                  intel_pmic_power_handler);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto out_error;
        }
 
        opregion->data = d;
        return 0;
+
+out_error:
+       acpi_lpat_free_conversion_table(opregion->lpat_table);
+       return ret;
 }
 EXPORT_SYMBOL_GPL(intel_pmic_install_opregion_handler);
 
index 4752b99399870efd068a1f1a6c656b1ebe333349..c723668e3e277def6f8d6309fe1af21b989951fb 100644 (file)
@@ -46,7 +46,7 @@ static bool acpi_dev_resource_len_valid(u64 start, u64 end, u64 len, bool io)
        if (len && reslen && reslen == len && start <= end)
                return true;
 
-       pr_info("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
+       pr_debug("ACPI: invalid or unassigned resource %s [%016llx - %016llx] length [%016llx]\n",
                io ? "io" : "mem", start, end, len);
 
        return false;
index 88a4f99dd2a7ccc117924a73b47ca0d5a93c8df5..debd30917010a17697102bc84d1e468c69c94d17 100644 (file)
@@ -540,6 +540,15 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "730U3E/740U3E"),
                },
        },
+       {
+        /* https://bugs.freedesktop.org/show_bug.cgi?id=87286 */
+        .callback = video_disable_native_backlight,
+        .ident = "SAMSUNG 900X3C/900X3D/900X3E/900X4C/900X4D",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "900X3C/900X3D/900X3E/900X4C/900X4D"),
+               },
+       },
 
        {
         /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
index cbdfbbf983927e85a4a83d94d20047f2fadf6357..b64bccbb78c9a5223548f7b7e25e33c4eb51afd4 100644 (file)
 #include <linux/ptrace.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/t10-pi.h>
 #include <linux/types.h>
 #include <scsi/sg.h>
 #include <asm-generic/io-64-nonatomic-lo-hi.h>
 
+#define NVME_MINORS            (1U << MINORBITS)
 #define NVME_Q_DEPTH           1024
 #define NVME_AQ_DEPTH          64
 #define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
 #define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
 #define ADMIN_TIMEOUT          (admin_timeout * HZ)
 #define SHUTDOWN_TIMEOUT       (shutdown_timeout * HZ)
-#define IOD_TIMEOUT            (retry_time * HZ)
 
 static unsigned char admin_timeout = 60;
 module_param(admin_timeout, byte, 0644);
@@ -57,10 +58,6 @@ unsigned char nvme_io_timeout = 30;
 module_param_named(io_timeout, nvme_io_timeout, byte, 0644);
 MODULE_PARM_DESC(io_timeout, "timeout in seconds for I/O");
 
-static unsigned char retry_time = 30;
-module_param(retry_time, byte, 0644);
-MODULE_PARM_DESC(retry_time, "time in seconds to retry failed I/O");
-
 static unsigned char shutdown_timeout = 5;
 module_param(shutdown_timeout, byte, 0644);
 MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown");
@@ -68,6 +65,9 @@ MODULE_PARM_DESC(shutdown_timeout, "timeout in seconds for controller shutdown")
 static int nvme_major;
 module_param(nvme_major, int, 0);
 
+static int nvme_char_major;
+module_param(nvme_char_major, int, 0);
+
 static int use_threaded_interrupts;
 module_param(use_threaded_interrupts, int, 0);
 
@@ -76,7 +76,8 @@ static LIST_HEAD(dev_list);
 static struct task_struct *nvme_thread;
 static struct workqueue_struct *nvme_workq;
 static wait_queue_head_t nvme_kthread_wait;
-static struct notifier_block nvme_nb;
+
+static struct class *nvme_class;
 
 static void nvme_reset_failed_dev(struct work_struct *ws);
 static int nvme_process_cq(struct nvme_queue *nvmeq);
@@ -95,7 +96,6 @@ struct async_cmd_info {
  * commands and one for I/O commands).
  */
 struct nvme_queue {
-       struct llist_node node;
        struct device *q_dmadev;
        struct nvme_dev *dev;
        char irqname[24];       /* nvme4294967295-65535\0 */
@@ -482,6 +482,62 @@ static int nvme_error_status(u16 status)
        }
 }
 
+static void nvme_dif_prep(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+       if (be32_to_cpu(pi->ref_tag) == v)
+               pi->ref_tag = cpu_to_be32(p);
+}
+
+static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
+{
+       if (be32_to_cpu(pi->ref_tag) == p)
+               pi->ref_tag = cpu_to_be32(v);
+}
+
+/**
+ * nvme_dif_remap - remaps ref tags to bip seed and physical lba
+ *
+ * The virtual start sector is the one that was originally submitted by the
+ * block layer.        Due to partitioning, MD/DM cloning, etc. the actual physical
+ * start sector may be different. Remap protection information to match the
+ * physical LBA on writes, and back to the original seed on reads.
+ *
+ * Type 0 and 3 do not have a ref tag, so no remapping required.
+ */
+static void nvme_dif_remap(struct request *req,
+                       void (*dif_swap)(u32 p, u32 v, struct t10_pi_tuple *pi))
+{
+       struct nvme_ns *ns = req->rq_disk->private_data;
+       struct bio_integrity_payload *bip;
+       struct t10_pi_tuple *pi;
+       void *p, *pmap;
+       u32 i, nlb, ts, phys, virt;
+
+       if (!ns->pi_type || ns->pi_type == NVME_NS_DPS_PI_TYPE3)
+               return;
+
+       bip = bio_integrity(req->bio);
+       if (!bip)
+               return;
+
+       pmap = kmap_atomic(bip->bip_vec->bv_page) + bip->bip_vec->bv_offset;
+       if (!pmap)
+               return;
+
+       p = pmap;
+       virt = bip_get_seed(bip);
+       phys = nvme_block_nr(ns, blk_rq_pos(req));
+       nlb = (blk_rq_bytes(req) >> ns->lba_shift);
+       ts = ns->disk->integrity->tuple_size;
+
+       for (i = 0; i < nlb; i++, virt++, phys++) {
+               pi = (struct t10_pi_tuple *)p;
+               dif_swap(phys, virt, pi);
+               p += ts;
+       }
+       kunmap_atomic(pmap);
+}
+
 static void req_completion(struct nvme_queue *nvmeq, void *ctx,
                                                struct nvme_completion *cqe)
 {
@@ -512,9 +568,16 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
                        "completing aborted command with status:%04x\n",
                        status);
 
-       if (iod->nents)
+       if (iod->nents) {
                dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->sg, iod->nents,
                        rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+               if (blk_integrity_rq(req)) {
+                       if (!rq_data_dir(req))
+                               nvme_dif_remap(req, nvme_dif_complete);
+                       dma_unmap_sg(&nvmeq->dev->pci_dev->dev, iod->meta_sg, 1,
+                               rq_data_dir(req) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+               }
+       }
        nvme_free_iod(nvmeq->dev, iod);
 
        blk_mq_complete_request(req);
@@ -670,6 +733,24 @@ static int nvme_submit_iod(struct nvme_queue *nvmeq, struct nvme_iod *iod,
        cmnd->rw.prp2 = cpu_to_le64(iod->first_dma);
        cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req)));
        cmnd->rw.length = cpu_to_le16((blk_rq_bytes(req) >> ns->lba_shift) - 1);
+
+       if (blk_integrity_rq(req)) {
+               cmnd->rw.metadata = cpu_to_le64(sg_dma_address(iod->meta_sg));
+               switch (ns->pi_type) {
+               case NVME_NS_DPS_PI_TYPE3:
+                       control |= NVME_RW_PRINFO_PRCHK_GUARD;
+                       break;
+               case NVME_NS_DPS_PI_TYPE1:
+               case NVME_NS_DPS_PI_TYPE2:
+                       control |= NVME_RW_PRINFO_PRCHK_GUARD |
+                                       NVME_RW_PRINFO_PRCHK_REF;
+                       cmnd->rw.reftag = cpu_to_le32(
+                                       nvme_block_nr(ns, blk_rq_pos(req)));
+                       break;
+               }
+       } else if (ns->ms)
+               control |= NVME_RW_PRINFO_PRACT;
+
        cmnd->rw.control = cpu_to_le16(control);
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
 
@@ -690,6 +771,19 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
        struct nvme_iod *iod;
        enum dma_data_direction dma_dir;
 
+       /*
+        * If formated with metadata, require the block layer provide a buffer
+        * unless this namespace is formated such that the metadata can be
+        * stripped/generated by the controller with PRACT=1.
+        */
+       if (ns->ms && !blk_integrity_rq(req)) {
+               if (!(ns->pi_type && ns->ms == 8)) {
+                       req->errors = -EFAULT;
+                       blk_mq_complete_request(req);
+                       return BLK_MQ_RQ_QUEUE_OK;
+               }
+       }
+
        iod = nvme_alloc_iod(req, ns->dev, GFP_ATOMIC);
        if (!iod)
                return BLK_MQ_RQ_QUEUE_BUSY;
@@ -725,6 +819,21 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
                                        iod->nents, dma_dir);
                        goto retry_cmd;
                }
+               if (blk_integrity_rq(req)) {
+                       if (blk_rq_count_integrity_sg(req->q, req->bio) != 1)
+                               goto error_cmd;
+
+                       sg_init_table(iod->meta_sg, 1);
+                       if (blk_rq_map_integrity_sg(
+                                       req->q, req->bio, iod->meta_sg) != 1)
+                               goto error_cmd;
+
+                       if (rq_data_dir(req))
+                               nvme_dif_remap(req, nvme_dif_prep);
+
+                       if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir))
+                               goto error_cmd;
+               }
        }
 
        nvme_set_info(cmd, iod, req_completion);
@@ -817,14 +926,6 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
        return IRQ_WAKE_THREAD;
 }
 
-static void nvme_abort_cmd_info(struct nvme_queue *nvmeq, struct nvme_cmd_info *
-                                                               cmd_info)
-{
-       spin_lock_irq(&nvmeq->q_lock);
-       cancel_cmd_info(cmd_info, NULL);
-       spin_unlock_irq(&nvmeq->q_lock);
-}
-
 struct sync_cmd_info {
        struct task_struct *task;
        u32 result;
@@ -847,7 +948,6 @@ static void sync_completion(struct nvme_queue *nvmeq, void *ctx,
 static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
                                                u32 *result, unsigned timeout)
 {
-       int ret;
        struct sync_cmd_info cmdinfo;
        struct nvme_cmd_info *cmd_rq = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = cmd_rq->nvmeq;
@@ -859,29 +959,12 @@ static int nvme_submit_sync_cmd(struct request *req, struct nvme_command *cmd,
 
        nvme_set_info(cmd_rq, &cmdinfo, sync_completion);
 
-       set_current_state(TASK_KILLABLE);
-       ret = nvme_submit_cmd(nvmeq, cmd);
-       if (ret) {
-               nvme_finish_cmd(nvmeq, req->tag, NULL);
-               set_current_state(TASK_RUNNING);
-       }
-       ret = schedule_timeout(timeout);
-
-       /*
-        * Ensure that sync_completion has either run, or that it will
-        * never run.
-        */
-       nvme_abort_cmd_info(nvmeq, blk_mq_rq_to_pdu(req));
-
-       /*
-        * We never got the completion
-        */
-       if (cmdinfo.status == -EINTR)
-               return -EINTR;
+       set_current_state(TASK_UNINTERRUPTIBLE);
+       nvme_submit_cmd(nvmeq, cmd);
+       schedule();
 
        if (result)
                *result = cmdinfo.result;
-
        return cmdinfo.status;
 }
 
@@ -1158,29 +1241,18 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
        struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
        struct nvme_queue *nvmeq = cmd->nvmeq;
 
-       /*
-        * The aborted req will be completed on receiving the abort req.
-        * We enable the timer again. If hit twice, it'll cause a device reset,
-        * as the device then is in a faulty state.
-        */
-       int ret = BLK_EH_RESET_TIMER;
-
        dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
                                                        nvmeq->qid);
-
        spin_lock_irq(&nvmeq->q_lock);
-       if (!nvmeq->dev->initialized) {
-               /*
-                * Force cancelled command frees the request, which requires we
-                * return BLK_EH_NOT_HANDLED.
-                */
-               nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
-               ret = BLK_EH_NOT_HANDLED;
-       } else
-               nvme_abort_req(req);
+       nvme_abort_req(req);
        spin_unlock_irq(&nvmeq->q_lock);
 
-       return ret;
+       /*
+        * The aborted req will be completed on receiving the abort req.
+        * We enable the timer again. If hit twice, it'll cause a device reset,
+        * as the device then is in a faulty state.
+        */
+       return BLK_EH_RESET_TIMER;
 }
 
 static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1233,7 +1305,6 @@ static void nvme_clear_queue(struct nvme_queue *nvmeq)
        struct blk_mq_hw_ctx *hctx = nvmeq->hctx;
 
        spin_lock_irq(&nvmeq->q_lock);
-       nvme_process_cq(nvmeq);
        if (hctx && hctx->tags)
                blk_mq_tag_busy_iter(hctx, nvme_cancel_queue_ios, nvmeq);
        spin_unlock_irq(&nvmeq->q_lock);
@@ -1256,7 +1327,10 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
        }
        if (!qid && dev->admin_q)
                blk_mq_freeze_queue_start(dev->admin_q);
-       nvme_clear_queue(nvmeq);
+
+       spin_lock_irq(&nvmeq->q_lock);
+       nvme_process_cq(nvmeq);
+       spin_unlock_irq(&nvmeq->q_lock);
 }
 
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
@@ -1875,13 +1949,61 @@ static int nvme_getgeo(struct block_device *bd, struct hd_geometry *geo)
        return 0;
 }
 
+static void nvme_config_discard(struct nvme_ns *ns)
+{
+       u32 logical_block_size = queue_logical_block_size(ns->queue);
+       ns->queue->limits.discard_zeroes_data = 0;
+       ns->queue->limits.discard_alignment = logical_block_size;
+       ns->queue->limits.discard_granularity = logical_block_size;
+       ns->queue->limits.max_discard_sectors = 0xffffffff;
+       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
+}
+
+static int nvme_noop_verify(struct blk_integrity_iter *iter)
+{
+       return 0;
+}
+
+static int nvme_noop_generate(struct blk_integrity_iter *iter)
+{
+       return 0;
+}
+
+struct blk_integrity nvme_meta_noop = {
+       .name                   = "NVME_META_NOOP",
+       .generate_fn            = nvme_noop_generate,
+       .verify_fn              = nvme_noop_verify,
+};
+
+static void nvme_init_integrity(struct nvme_ns *ns)
+{
+       struct blk_integrity integrity;
+
+       switch (ns->pi_type) {
+       case NVME_NS_DPS_PI_TYPE3:
+               integrity = t10_pi_type3_crc;
+               break;
+       case NVME_NS_DPS_PI_TYPE1:
+       case NVME_NS_DPS_PI_TYPE2:
+               integrity = t10_pi_type1_crc;
+               break;
+       default:
+               integrity = nvme_meta_noop;
+               break;
+       }
+       integrity.tuple_size = ns->ms;
+       blk_integrity_register(ns->disk, &integrity);
+       blk_queue_max_integrity_segments(ns->queue, 1);
+}
+
 static int nvme_revalidate_disk(struct gendisk *disk)
 {
        struct nvme_ns *ns = disk->private_data;
        struct nvme_dev *dev = ns->dev;
        struct nvme_id_ns *id;
        dma_addr_t dma_addr;
-       int lbaf;
+       int lbaf, pi_type, old_ms;
+       unsigned short bs;
 
        id = dma_alloc_coherent(&dev->pci_dev->dev, 4096, &dma_addr,
                                                                GFP_KERNEL);
@@ -1890,16 +2012,50 @@ static int nvme_revalidate_disk(struct gendisk *disk)
                                                                __func__);
                return 0;
        }
+       if (nvme_identify(dev, ns->ns_id, 0, dma_addr)) {
+               dev_warn(&dev->pci_dev->dev,
+                       "identify failed ns:%d, setting capacity to 0\n",
+                       ns->ns_id);
+               memset(id, 0, sizeof(*id));
+       }
 
-       if (nvme_identify(dev, ns->ns_id, 0, dma_addr))
-               goto free;
-
-       lbaf = id->flbas & 0xf;
+       old_ms = ns->ms;
+       lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK;
        ns->lba_shift = id->lbaf[lbaf].ds;
+       ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+
+       /*
+        * If identify namespace failed, use default 512 byte block size so
+        * block layer can use before failing read/write for 0 capacity.
+        */
+       if (ns->lba_shift == 0)
+               ns->lba_shift = 9;
+       bs = 1 << ns->lba_shift;
+
+       /* XXX: PI implementation requires metadata equal t10 pi tuple size */
+       pi_type = ns->ms == sizeof(struct t10_pi_tuple) ?
+                                       id->dps & NVME_NS_DPS_PI_MASK : 0;
+
+       if (disk->integrity && (ns->pi_type != pi_type || ns->ms != old_ms ||
+                               bs != queue_logical_block_size(disk->queue) ||
+                               (ns->ms && id->flbas & NVME_NS_FLBAS_META_EXT)))
+               blk_integrity_unregister(disk);
+
+       ns->pi_type = pi_type;
+       blk_queue_logical_block_size(ns->queue, bs);
+
+       if (ns->ms && !disk->integrity && (disk->flags & GENHD_FL_UP) &&
+                               !(id->flbas & NVME_NS_FLBAS_META_EXT))
+               nvme_init_integrity(ns);
+
+       if (id->ncap == 0 || (ns->ms && !disk->integrity))
+               set_capacity(disk, 0);
+       else
+               set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+
+       if (dev->oncs & NVME_CTRL_ONCS_DSM)
+               nvme_config_discard(ns);
 
-       blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
-       set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
- free:
        dma_free_coherent(&dev->pci_dev->dev, 4096, id, dma_addr);
        return 0;
 }
@@ -1923,8 +2079,7 @@ static int nvme_kthread(void *data)
                spin_lock(&dev_list_lock);
                list_for_each_entry_safe(dev, next, &dev_list, node) {
                        int i;
-                       if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
-                                                       dev->initialized) {
+                       if (readl(&dev->bar->csts) & NVME_CSTS_CFS) {
                                if (work_busy(&dev->reset_work))
                                        continue;
                                list_del_init(&dev->node);
@@ -1956,30 +2111,16 @@ static int nvme_kthread(void *data)
        return 0;
 }
 
-static void nvme_config_discard(struct nvme_ns *ns)
-{
-       u32 logical_block_size = queue_logical_block_size(ns->queue);
-       ns->queue->limits.discard_zeroes_data = 0;
-       ns->queue->limits.discard_alignment = logical_block_size;
-       ns->queue->limits.discard_granularity = logical_block_size;
-       ns->queue->limits.max_discard_sectors = 0xffffffff;
-       queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
-}
-
-static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
-                       struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
+static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
 {
        struct nvme_ns *ns;
        struct gendisk *disk;
        int node = dev_to_node(&dev->pci_dev->dev);
-       int lbaf;
-
-       if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
-               return NULL;
 
        ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node);
        if (!ns)
-               return NULL;
+               return;
+
        ns->queue = blk_mq_init_queue(&dev->tagset);
        if (IS_ERR(ns->queue))
                goto out_free_ns;
@@ -1995,9 +2136,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
 
        ns->ns_id = nsid;
        ns->disk = disk;
-       lbaf = id->flbas & 0xf;
-       ns->lba_shift = id->lbaf[lbaf].ds;
-       ns->ms = le16_to_cpu(id->lbaf[lbaf].ms);
+       ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */
+       list_add_tail(&ns->list, &dev->namespaces);
+
        blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
        if (dev->max_hw_sectors)
                blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
@@ -2011,21 +2152,26 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
        disk->fops = &nvme_fops;
        disk->private_data = ns;
        disk->queue = ns->queue;
-       disk->driverfs_dev = &dev->pci_dev->dev;
+       disk->driverfs_dev = dev->device;
        disk->flags = GENHD_FL_EXT_DEVT;
        sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
-       set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
-
-       if (dev->oncs & NVME_CTRL_ONCS_DSM)
-               nvme_config_discard(ns);
-
-       return ns;
 
+       /*
+        * Initialize capacity to 0 until we establish the namespace format and
+        * setup integrity extentions if necessary. The revalidate_disk after
+        * add_disk allows the driver to register with integrity if the format
+        * requires it.
+        */
+       set_capacity(disk, 0);
+       nvme_revalidate_disk(ns->disk);
+       add_disk(ns->disk);
+       if (ns->ms)
+               revalidate_disk(ns->disk);
+       return;
  out_free_queue:
        blk_cleanup_queue(ns->queue);
  out_free_ns:
        kfree(ns);
-       return NULL;
 }
 
 static void nvme_create_io_queues(struct nvme_dev *dev)
@@ -2150,22 +2296,20 @@ static int nvme_dev_add(struct nvme_dev *dev)
        struct pci_dev *pdev = dev->pci_dev;
        int res;
        unsigned nn, i;
-       struct nvme_ns *ns;
        struct nvme_id_ctrl *ctrl;
-       struct nvme_id_ns *id_ns;
        void *mem;
        dma_addr_t dma_addr;
        int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
 
-       mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
+       mem = dma_alloc_coherent(&pdev->dev, 4096, &dma_addr, GFP_KERNEL);
        if (!mem)
                return -ENOMEM;
 
        res = nvme_identify(dev, 0, 1, dma_addr);
        if (res) {
                dev_err(&pdev->dev, "Identify Controller failed (%d)\n", res);
-               res = -EIO;
-               goto out;
+               dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
+               return -EIO;
        }
 
        ctrl = mem;
@@ -2191,6 +2335,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
                } else
                        dev->max_hw_sectors = max_hw_sectors;
        }
+       dma_free_coherent(&dev->pci_dev->dev, 4096, mem, dma_addr);
 
        dev->tagset.ops = &nvme_mq_ops;
        dev->tagset.nr_hw_queues = dev->online_queues - 1;
@@ -2203,33 +2348,12 @@ static int nvme_dev_add(struct nvme_dev *dev)
        dev->tagset.driver_data = dev;
 
        if (blk_mq_alloc_tag_set(&dev->tagset))
-               goto out;
-
-       id_ns = mem;
-       for (i = 1; i <= nn; i++) {
-               res = nvme_identify(dev, i, 0, dma_addr);
-               if (res)
-                       continue;
-
-               if (id_ns->ncap == 0)
-                       continue;
-
-               res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
-                                                       dma_addr + 4096, NULL);
-               if (res)
-                       memset(mem + 4096, 0, 4096);
+               return 0;
 
-               ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
-               if (ns)
-                       list_add_tail(&ns->list, &dev->namespaces);
-       }
-       list_for_each_entry(ns, &dev->namespaces, list)
-               add_disk(ns->disk);
-       res = 0;
+       for (i = 1; i <= nn; i++)
+               nvme_alloc_ns(dev, i);
 
- out:
-       dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
-       return res;
+       return 0;
 }
 
 static int nvme_dev_map(struct nvme_dev *dev)
@@ -2358,8 +2482,6 @@ static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
 static void nvme_del_queue_end(struct nvme_queue *nvmeq)
 {
        struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
-
-       nvme_clear_queue(nvmeq);
        nvme_put_dq(dq);
 }
 
@@ -2502,7 +2624,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
        int i;
        u32 csts = -1;
 
-       dev->initialized = 0;
        nvme_dev_list_remove(dev);
 
        if (dev->bar) {
@@ -2513,7 +2634,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
                for (i = dev->queue_count - 1; i >= 0; i--) {
                        struct nvme_queue *nvmeq = dev->queues[i];
                        nvme_suspend_queue(nvmeq);
-                       nvme_clear_queue(nvmeq);
                }
        } else {
                nvme_disable_io_queues(dev);
@@ -2521,6 +2641,9 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
                nvme_disable_queue(dev, 0);
        }
        nvme_dev_unmap(dev);
+
+       for (i = dev->queue_count - 1; i >= 0; i--)
+               nvme_clear_queue(dev->queues[i]);
 }
 
 static void nvme_dev_remove(struct nvme_dev *dev)
@@ -2528,8 +2651,11 @@ static void nvme_dev_remove(struct nvme_dev *dev)
        struct nvme_ns *ns;
 
        list_for_each_entry(ns, &dev->namespaces, list) {
-               if (ns->disk->flags & GENHD_FL_UP)
+               if (ns->disk->flags & GENHD_FL_UP) {
+                       if (ns->disk->integrity)
+                               blk_integrity_unregister(ns->disk);
                        del_gendisk(ns->disk);
+               }
                if (!blk_queue_dying(ns->queue)) {
                        blk_mq_abort_requeue_list(ns->queue);
                        blk_cleanup_queue(ns->queue);
@@ -2611,6 +2737,7 @@ static void nvme_free_dev(struct kref *kref)
        struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
 
        pci_dev_put(dev->pci_dev);
+       put_device(dev->device);
        nvme_free_namespaces(dev);
        nvme_release_instance(dev);
        blk_mq_free_tag_set(&dev->tagset);
@@ -2622,11 +2749,27 @@ static void nvme_free_dev(struct kref *kref)
 
 static int nvme_dev_open(struct inode *inode, struct file *f)
 {
-       struct nvme_dev *dev = container_of(f->private_data, struct nvme_dev,
-                                                               miscdev);
-       kref_get(&dev->kref);
-       f->private_data = dev;
-       return 0;
+       struct nvme_dev *dev;
+       int instance = iminor(inode);
+       int ret = -ENODEV;
+
+       spin_lock(&dev_list_lock);
+       list_for_each_entry(dev, &dev_list, node) {
+               if (dev->instance == instance) {
+                       if (!dev->admin_q) {
+                               ret = -EWOULDBLOCK;
+                               break;
+                       }
+                       if (!kref_get_unless_zero(&dev->kref))
+                               break;
+                       f->private_data = dev;
+                       ret = 0;
+                       break;
+               }
+       }
+       spin_unlock(&dev_list_lock);
+
+       return ret;
 }
 
 static int nvme_dev_release(struct inode *inode, struct file *f)
@@ -2768,7 +2911,6 @@ static int nvme_dev_resume(struct nvme_dev *dev)
                nvme_unfreeze_queues(dev);
                nvme_set_irq_hints(dev);
        }
-       dev->initialized = 1;
        return 0;
 }
 
@@ -2799,6 +2941,7 @@ static void nvme_reset_workfn(struct work_struct *work)
        dev->reset_workfn(work);
 }
 
+static void nvme_async_probe(struct work_struct *work);
 static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        int node, result = -ENOMEM;
@@ -2834,37 +2977,20 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto release;
 
        kref_init(&dev->kref);
-       result = nvme_dev_start(dev);
-       if (result)
+       dev->device = device_create(nvme_class, &pdev->dev,
+                               MKDEV(nvme_char_major, dev->instance),
+                               dev, "nvme%d", dev->instance);
+       if (IS_ERR(dev->device)) {
+               result = PTR_ERR(dev->device);
                goto release_pools;
+       }
+       get_device(dev->device);
 
-       if (dev->online_queues > 1)
-               result = nvme_dev_add(dev);
-       if (result)
-               goto shutdown;
-
-       scnprintf(dev->name, sizeof(dev->name), "nvme%d", dev->instance);
-       dev->miscdev.minor = MISC_DYNAMIC_MINOR;
-       dev->miscdev.parent = &pdev->dev;
-       dev->miscdev.name = dev->name;
-       dev->miscdev.fops = &nvme_dev_fops;
-       result = misc_register(&dev->miscdev);
-       if (result)
-               goto remove;
-
-       nvme_set_irq_hints(dev);
-
-       dev->initialized = 1;
+       INIT_WORK(&dev->probe_work, nvme_async_probe);
+       schedule_work(&dev->probe_work);
        return 0;
 
- remove:
-       nvme_dev_remove(dev);
-       nvme_dev_remove_admin(dev);
-       nvme_free_namespaces(dev);
- shutdown:
-       nvme_dev_shutdown(dev);
  release_pools:
-       nvme_free_queues(dev, 0);
        nvme_release_prp_pools(dev);
  release:
        nvme_release_instance(dev);
@@ -2877,6 +3003,29 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return result;
 }
 
+static void nvme_async_probe(struct work_struct *work)
+{
+       struct nvme_dev *dev = container_of(work, struct nvme_dev, probe_work);
+       int result;
+
+       result = nvme_dev_start(dev);
+       if (result)
+               goto reset;
+
+       if (dev->online_queues > 1)
+               result = nvme_dev_add(dev);
+       if (result)
+               goto reset;
+
+       nvme_set_irq_hints(dev);
+       return;
+ reset:
+       if (!work_busy(&dev->reset_work)) {
+               dev->reset_workfn = nvme_reset_failed_dev;
+               queue_work(nvme_workq, &dev->reset_work);
+       }
+}
+
 static void nvme_reset_notify(struct pci_dev *pdev, bool prepare)
 {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
@@ -2902,11 +3051,12 @@ static void nvme_remove(struct pci_dev *pdev)
        spin_unlock(&dev_list_lock);
 
        pci_set_drvdata(pdev, NULL);
+       flush_work(&dev->probe_work);
        flush_work(&dev->reset_work);
-       misc_deregister(&dev->miscdev);
        nvme_dev_shutdown(dev);
        nvme_dev_remove(dev);
        nvme_dev_remove_admin(dev);
+       device_destroy(nvme_class, MKDEV(nvme_char_major, dev->instance));
        nvme_free_queues(dev, 0);
        nvme_release_prp_pools(dev);
        kref_put(&dev->kref, nvme_free_dev);
@@ -2990,11 +3140,26 @@ static int __init nvme_init(void)
        else if (result > 0)
                nvme_major = result;
 
+       result = __register_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme",
+                                                       &nvme_dev_fops);
+       if (result < 0)
+               goto unregister_blkdev;
+       else if (result > 0)
+               nvme_char_major = result;
+
+       nvme_class = class_create(THIS_MODULE, "nvme");
+       if (!nvme_class)
+               goto unregister_chrdev;
+
        result = pci_register_driver(&nvme_driver);
        if (result)
-               goto unregister_blkdev;
+               goto destroy_class;
        return 0;
 
+ destroy_class:
+       class_destroy(nvme_class);
+ unregister_chrdev:
+       __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
  unregister_blkdev:
        unregister_blkdev(nvme_major, "nvme");
  kill_workq:
@@ -3005,9 +3170,10 @@ static int __init nvme_init(void)
 static void __exit nvme_exit(void)
 {
        pci_unregister_driver(&nvme_driver);
-       unregister_hotcpu_notifier(&nvme_nb);
        unregister_blkdev(nvme_major, "nvme");
        destroy_workqueue(nvme_workq);
+       class_destroy(nvme_class);
+       __unregister_chrdev(nvme_char_major, 0, NVME_MINORS, "nvme");
        BUG_ON(nvme_thread && !IS_ERR(nvme_thread));
        _nvme_check_size();
 }
index 5e78568026c339da939a33acd54cbd80891c5a10..e10196e0182d450667cf886421e9475c218f30c8 100644 (file)
@@ -779,10 +779,8 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        struct nvme_dev *dev = ns->dev;
        dma_addr_t dma_addr;
        void *mem;
-       struct nvme_id_ctrl *id_ctrl;
        int res = SNTI_TRANSLATION_SUCCESS;
        int nvme_sc;
-       u8 ieee[4];
        int xfer_len;
        __be32 tmp_id = cpu_to_be32(ns->ns_id);
 
@@ -793,46 +791,60 @@ static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                goto out_dma;
        }
 
-       /* nvme controller identify */
-       nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
-       res = nvme_trans_status_code(hdr, nvme_sc);
-       if (res)
-               goto out_free;
-       if (nvme_sc) {
-               res = nvme_sc;
-               goto out_free;
-       }
-       id_ctrl = mem;
-
-       /* Since SCSI tried to save 4 bits... [SPC-4(r34) Table 591] */
-       ieee[0] = id_ctrl->ieee[0] << 4;
-       ieee[1] = id_ctrl->ieee[0] >> 4 | id_ctrl->ieee[1] << 4;
-       ieee[2] = id_ctrl->ieee[1] >> 4 | id_ctrl->ieee[2] << 4;
-       ieee[3] = id_ctrl->ieee[2] >> 4;
-
-       memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
+       memset(inq_response, 0, alloc_len);
        inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;    /* Page Code */
-       inq_response[3] = 20;      /* Page Length */
-       /* Designation Descriptor start */
-       inq_response[4] = 0x01;    /* Proto ID=0h | Code set=1h */
-       inq_response[5] = 0x03;    /* PIV=0b | Asso=00b | Designator Type=3h */
-       inq_response[6] = 0x00;    /* Rsvd */
-       inq_response[7] = 16;      /* Designator Length */
-       /* Designator start */
-       inq_response[8] = 0x60 | ieee[3]; /* NAA=6h | IEEE ID MSB, High nibble*/
-       inq_response[9] = ieee[2];        /* IEEE ID */
-       inq_response[10] = ieee[1];       /* IEEE ID */
-       inq_response[11] = ieee[0];       /* IEEE ID| Vendor Specific ID... */
-       inq_response[12] = (dev->pci_dev->vendor & 0xFF00) >> 8;
-       inq_response[13] = (dev->pci_dev->vendor & 0x00FF);
-       inq_response[14] = dev->serial[0];
-       inq_response[15] = dev->serial[1];
-       inq_response[16] = dev->model[0];
-       inq_response[17] = dev->model[1];
-       memcpy(&inq_response[18], &tmp_id, sizeof(u32));
-       /* Last 2 bytes are zero */
+       if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
+               struct nvme_id_ns *id_ns = mem;
+               void *eui = id_ns->eui64;
+               int len = sizeof(id_ns->eui64);
 
-       xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
+               nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
+               res = nvme_trans_status_code(hdr, nvme_sc);
+               if (res)
+                       goto out_free;
+               if (nvme_sc) {
+                       res = nvme_sc;
+                       goto out_free;
+               }
+
+               if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
+                       if (bitmap_empty(eui, len * 8)) {
+                               eui = id_ns->nguid;
+                               len = sizeof(id_ns->nguid);
+                       }
+               }
+               if (bitmap_empty(eui, len * 8))
+                       goto scsi_string;
+
+               inq_response[3] = 4 + len; /* Page Length */
+               /* Designation Descriptor start */
+               inq_response[4] = 0x01;    /* Proto ID=0h | Code set=1h */
+               inq_response[5] = 0x02;    /* PIV=0b | Asso=00b | Designator Type=2h */
+               inq_response[6] = 0x00;    /* Rsvd */
+               inq_response[7] = len;     /* Designator Length */
+               memcpy(&inq_response[8], eui, len);
+       } else {
+ scsi_string:
+               if (alloc_len < 72) {
+                       res = nvme_trans_completion(hdr,
+                                       SAM_STAT_CHECK_CONDITION,
+                                       ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
+                                       SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
+                       goto out_free;
+               }
+               inq_response[3] = 0x48;    /* Page Length */
+               /* Designation Descriptor start */
+               inq_response[4] = 0x03;    /* Proto ID=0h | Code set=3h */
+               inq_response[5] = 0x08;    /* PIV=0b | Asso=00b | Designator Type=8h */
+               inq_response[6] = 0x00;    /* Rsvd */
+               inq_response[7] = 0x44;    /* Designator Length */
+
+               sprintf(&inq_response[8], "%04x", dev->pci_dev->vendor);
+               memcpy(&inq_response[12], dev->model, sizeof(dev->model));
+               sprintf(&inq_response[52], "%04x", tmp_id);
+               memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
+       }
+       xfer_len = alloc_len;
        res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
 
  out_free:
@@ -1600,7 +1612,7 @@ static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
                /* 10 Byte CDB */
                *bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
                        parm_list[MODE_SELECT_10_BD_OFFSET + 1];
-               *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &&
+               *llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
                                MODE_SELECT_10_LLBAA_MASK;
        } else {
                /* 6 Byte CDB */
@@ -2222,7 +2234,7 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        page_code = GET_INQ_PAGE_CODE(cmd);
        alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
 
-       inq_response = kmalloc(STANDARD_INQUIRY_LENGTH, GFP_KERNEL);
+       inq_response = kmalloc(alloc_len, GFP_KERNEL);
        if (inq_response == NULL) {
                res = -ENOMEM;
                goto out_mem;
index 8a86b62466f7ce72b54853b283e03fd495df8083..b40af3203089c846db053dfac879567230509299 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/kernel.h>
 #include <linux/device.h>
 #include <linux/module.h>
+#include <linux/blk-mq.h>
 #include <linux/fs.h>
 #include <linux/blkdev.h>
 #include <linux/slab.h>
@@ -340,9 +341,7 @@ struct rbd_device {
 
        char                    name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
 
-       struct list_head        rq_queue;       /* incoming rq queue */
        spinlock_t              lock;           /* queue, flags, open_count */
-       struct work_struct      rq_work;
 
        struct rbd_image_header header;
        unsigned long           flags;          /* possibly lock protected */
@@ -360,6 +359,9 @@ struct rbd_device {
        atomic_t                parent_ref;
        struct rbd_device       *parent;
 
+       /* Block layer tags. */
+       struct blk_mq_tag_set   tag_set;
+
        /* protects updating the header */
        struct rw_semaphore     header_rwsem;
 
@@ -1817,7 +1819,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
 
        /*
         * We support a 64-bit length, but ultimately it has to be
-        * passed to blk_end_request(), which takes an unsigned int.
+        * passed to the block layer, which just supports a 32-bit
+        * length field.
         */
        obj_request->xferred = osd_req->r_reply_op_len[0];
        rbd_assert(obj_request->xferred < (u64)UINT_MAX);
@@ -2275,7 +2278,10 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
                more = obj_request->which < img_request->obj_request_count - 1;
        } else {
                rbd_assert(img_request->rq != NULL);
-               more = blk_end_request(img_request->rq, result, xferred);
+
+               more = blk_update_request(img_request->rq, result, xferred);
+               if (!more)
+                       __blk_mq_end_request(img_request->rq, result);
        }
 
        return more;
@@ -3304,8 +3310,10 @@ out:
        return ret;
 }
 
-static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
+static void rbd_queue_workfn(struct work_struct *work)
 {
+       struct request *rq = blk_mq_rq_from_pdu(work);
+       struct rbd_device *rbd_dev = rq->q->queuedata;
        struct rbd_img_request *img_request;
        struct ceph_snap_context *snapc = NULL;
        u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
@@ -3314,6 +3322,13 @@ static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
        u64 mapping_size;
        int result;
 
+       if (rq->cmd_type != REQ_TYPE_FS) {
+               dout("%s: non-fs request type %d\n", __func__,
+                       (int) rq->cmd_type);
+               result = -EIO;
+               goto err;
+       }
+
        if (rq->cmd_flags & REQ_DISCARD)
                op_type = OBJ_OP_DISCARD;
        else if (rq->cmd_flags & REQ_WRITE)
@@ -3359,6 +3374,8 @@ static void rbd_handle_request(struct rbd_device *rbd_dev, struct request *rq)
                goto err_rq;    /* Shouldn't happen */
        }
 
+       blk_mq_start_request(rq);
+
        down_read(&rbd_dev->header_rwsem);
        mapping_size = rbd_dev->mapping.size;
        if (op_type != OBJ_OP_READ) {
@@ -3404,53 +3421,18 @@ err_rq:
                rbd_warn(rbd_dev, "%s %llx at %llx result %d",
                         obj_op_name(op_type), length, offset, result);
        ceph_put_snap_context(snapc);
-       blk_end_request_all(rq, result);
+err:
+       blk_mq_end_request(rq, result);
 }
 
-static void rbd_request_workfn(struct work_struct *work)
+static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
+               const struct blk_mq_queue_data *bd)
 {
-       struct rbd_device *rbd_dev =
-           container_of(work, struct rbd_device, rq_work);
-       struct request *rq, *next;
-       LIST_HEAD(requests);
-
-       spin_lock_irq(&rbd_dev->lock); /* rq->q->queue_lock */
-       list_splice_init(&rbd_dev->rq_queue, &requests);
-       spin_unlock_irq(&rbd_dev->lock);
+       struct request *rq = bd->rq;
+       struct work_struct *work = blk_mq_rq_to_pdu(rq);
 
-       list_for_each_entry_safe(rq, next, &requests, queuelist) {
-               list_del_init(&rq->queuelist);
-               rbd_handle_request(rbd_dev, rq);
-       }
-}
-
-/*
- * Called with q->queue_lock held and interrupts disabled, possibly on
- * the way to schedule().  Do not sleep here!
- */
-static void rbd_request_fn(struct request_queue *q)
-{
-       struct rbd_device *rbd_dev = q->queuedata;
-       struct request *rq;
-       int queued = 0;
-
-       rbd_assert(rbd_dev);
-
-       while ((rq = blk_fetch_request(q))) {
-               /* Ignore any non-FS requests that filter through. */
-               if (rq->cmd_type != REQ_TYPE_FS) {
-                       dout("%s: non-fs request type %d\n", __func__,
-                               (int) rq->cmd_type);
-                       __blk_end_request_all(rq, 0);
-                       continue;
-               }
-
-               list_add_tail(&rq->queuelist, &rbd_dev->rq_queue);
-               queued++;
-       }
-
-       if (queued)
-               queue_work(rbd_wq, &rbd_dev->rq_work);
+       queue_work(rbd_wq, work);
+       return BLK_MQ_RQ_QUEUE_OK;
 }
 
 /*
@@ -3511,6 +3493,7 @@ static void rbd_free_disk(struct rbd_device *rbd_dev)
                del_gendisk(disk);
                if (disk->queue)
                        blk_cleanup_queue(disk->queue);
+               blk_mq_free_tag_set(&rbd_dev->tag_set);
        }
        put_disk(disk);
 }
@@ -3694,7 +3677,7 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
 
        ret = rbd_dev_header_info(rbd_dev);
        if (ret)
-               return ret;
+               goto out;
 
        /*
         * If there is a parent, see if it has disappeared due to the
@@ -3703,30 +3686,46 @@ static int rbd_dev_refresh(struct rbd_device *rbd_dev)
        if (rbd_dev->parent) {
                ret = rbd_dev_v2_parent_info(rbd_dev);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
-               if (rbd_dev->mapping.size != rbd_dev->header.image_size)
-                       rbd_dev->mapping.size = rbd_dev->header.image_size;
+               rbd_dev->mapping.size = rbd_dev->header.image_size;
        } else {
                /* validate mapped snapshot's EXISTS flag */
                rbd_exists_validate(rbd_dev);
        }
 
+out:
        up_write(&rbd_dev->header_rwsem);
-
-       if (mapping_size != rbd_dev->mapping.size)
+       if (!ret && mapping_size != rbd_dev->mapping.size)
                rbd_dev_update_size(rbd_dev);
 
+       return ret;
+}
+
+static int rbd_init_request(void *data, struct request *rq,
+               unsigned int hctx_idx, unsigned int request_idx,
+               unsigned int numa_node)
+{
+       struct work_struct *work = blk_mq_rq_to_pdu(rq);
+
+       INIT_WORK(work, rbd_queue_workfn);
        return 0;
 }
 
+static struct blk_mq_ops rbd_mq_ops = {
+       .queue_rq       = rbd_queue_rq,
+       .map_queue      = blk_mq_map_queue,
+       .init_request   = rbd_init_request,
+};
+
 static int rbd_init_disk(struct rbd_device *rbd_dev)
 {
        struct gendisk *disk;
        struct request_queue *q;
        u64 segment_size;
+       int err;
 
        /* create gendisk info */
        disk = alloc_disk(single_major ?
@@ -3744,10 +3743,25 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        disk->fops = &rbd_bd_ops;
        disk->private_data = rbd_dev;
 
-       q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
-       if (!q)
+       memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
+       rbd_dev->tag_set.ops = &rbd_mq_ops;
+       rbd_dev->tag_set.queue_depth = BLKDEV_MAX_RQ;
+       rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
+       rbd_dev->tag_set.flags =
+               BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
+       rbd_dev->tag_set.nr_hw_queues = 1;
+       rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
+
+       err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
+       if (err)
                goto out_disk;
 
+       q = blk_mq_init_queue(&rbd_dev->tag_set);
+       if (IS_ERR(q)) {
+               err = PTR_ERR(q);
+               goto out_tag_set;
+       }
+
        /* We use the default size, but let's be explicit about it. */
        blk_queue_physical_block_size(q, SECTOR_SIZE);
 
@@ -3773,10 +3787,11 @@ static int rbd_init_disk(struct rbd_device *rbd_dev)
        rbd_dev->disk = disk;
 
        return 0;
+out_tag_set:
+       blk_mq_free_tag_set(&rbd_dev->tag_set);
 out_disk:
        put_disk(disk);
-
-       return -ENOMEM;
+       return err;
 }
 
 /*
@@ -4033,8 +4048,6 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
                return NULL;
 
        spin_lock_init(&rbd_dev->lock);
-       INIT_LIST_HEAD(&rbd_dev->rq_queue);
-       INIT_WORK(&rbd_dev->rq_work, rbd_request_workfn);
        rbd_dev->flags = 0;
        atomic_set(&rbd_dev->parent_ref, 0);
        INIT_LIST_HEAD(&rbd_dev->node);
@@ -4274,32 +4287,22 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
        }
 
        /*
-        * We always update the parent overlap.  If it's zero we
-        * treat it specially.
+        * We always update the parent overlap.  If it's zero we issue
+        * a warning, as we will proceed as if there was no parent.
         */
-       rbd_dev->parent_overlap = overlap;
        if (!overlap) {
-
-               /* A null parent_spec indicates it's the initial probe */
-
                if (parent_spec) {
-                       /*
-                        * The overlap has become zero, so the clone
-                        * must have been resized down to 0 at some
-                        * point.  Treat this the same as a flatten.
-                        */
-                       rbd_dev_parent_put(rbd_dev);
-                       pr_info("%s: clone image now standalone\n",
-                               rbd_dev->disk->disk_name);
+                       /* refresh, careful to warn just once */
+                       if (rbd_dev->parent_overlap)
+                               rbd_warn(rbd_dev,
+                                   "clone now standalone (overlap became 0)");
                } else {
-                       /*
-                        * For the initial probe, if we find the
-                        * overlap is zero we just pretend there was
-                        * no parent image.
-                        */
-                       rbd_warn(rbd_dev, "ignoring parent with overlap 0");
+                       /* initial probe */
+                       rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
                }
        }
+       rbd_dev->parent_overlap = overlap;
+
 out:
        ret = 0;
 out_err:
@@ -4770,36 +4773,6 @@ static inline size_t next_token(const char **buf)
        return strcspn(*buf, spaces);   /* Return token length */
 }
 
-/*
- * Finds the next token in *buf, and if the provided token buffer is
- * big enough, copies the found token into it.  The result, if
- * copied, is guaranteed to be terminated with '\0'.  Note that *buf
- * must be terminated with '\0' on entry.
- *
- * Returns the length of the token found (not including the '\0').
- * Return value will be 0 if no token is found, and it will be >=
- * token_size if the token would not fit.
- *
- * The *buf pointer will be updated to point beyond the end of the
- * found token.  Note that this occurs even if the token buffer is
- * too small to hold it.
- */
-static inline size_t copy_token(const char **buf,
-                               char *token,
-                               size_t token_size)
-{
-        size_t len;
-
-       len = next_token(buf);
-       if (len < token_size) {
-               memcpy(token, *buf, len);
-               *(token + len) = '\0';
-       }
-       *buf += len;
-
-        return len;
-}
-
 /*
  * Finds the next token in *buf, dynamically allocates a buffer big
  * enough to hold a copy of it, and copies the token into the new
index ec318bf434a6c3d890d26060a9c388295bde807e..1786574536b21ef06415ae62ff14bdbfd77b3f2c 100644 (file)
@@ -157,12 +157,16 @@ static int ipmi_release(struct inode *inode, struct file *file)
 {
        struct ipmi_file_private *priv = file->private_data;
        int                      rv;
+       struct  ipmi_recv_msg *msg, *next;
 
        rv = ipmi_destroy_user(priv->user);
        if (rv)
                return rv;
 
-       /* FIXME - free the messages in the list. */
+       list_for_each_entry_safe(msg, next, &priv->recv_msgs, link)
+               ipmi_free_recv_msg(msg);
+
+
        kfree(priv);
 
        return 0;
index 6b65fa4e0c5586895df2b26ee499c9e5ad4d8b2c..9bb592872532b1853efb00930c001e54df5fa7ed 100644 (file)
@@ -1483,14 +1483,10 @@ static inline void format_lan_msg(struct ipmi_smi_msg   *smi_msg,
        smi_msg->msgid = msgid;
 }
 
-static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
-                    struct ipmi_smi_msg *smi_msg, int priority)
+static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf,
+                                            struct ipmi_smi_msg *smi_msg,
+                                            int priority)
 {
-       int run_to_completion = intf->run_to_completion;
-       unsigned long flags;
-
-       if (!run_to_completion)
-               spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
        if (intf->curr_msg) {
                if (priority > 0)
                        list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
@@ -1500,8 +1496,25 @@ static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
        } else {
                intf->curr_msg = smi_msg;
        }
-       if (!run_to_completion)
+
+       return smi_msg;
+}
+
+
+static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
+                    struct ipmi_smi_msg *smi_msg, int priority)
+{
+       int run_to_completion = intf->run_to_completion;
+
+       if (run_to_completion) {
+               smi_msg = smi_add_send_msg(intf, smi_msg, priority);
+       } else {
+               unsigned long flags;
+
+               spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
+               smi_msg = smi_add_send_msg(intf, smi_msg, priority);
                spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
+       }
 
        if (smi_msg)
                handlers->sender(intf->send_info, smi_msg);
@@ -1985,7 +1998,9 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v)
        seq_printf(m, "%x", intf->channels[0].address);
        for (i = 1; i < IPMI_MAX_CHANNELS; i++)
                seq_printf(m, " %x", intf->channels[i].address);
-       return seq_putc(m, '\n');
+       seq_putc(m, '\n');
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
@@ -2004,9 +2019,11 @@ static int smi_version_proc_show(struct seq_file *m, void *v)
 {
        ipmi_smi_t intf = m->private;
 
-       return seq_printf(m, "%u.%u\n",
-                      ipmi_version_major(&intf->bmc->id),
-                      ipmi_version_minor(&intf->bmc->id));
+       seq_printf(m, "%u.%u\n",
+                  ipmi_version_major(&intf->bmc->id),
+                  ipmi_version_minor(&intf->bmc->id));
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_version_proc_open(struct inode *inode, struct file *file)
@@ -2353,11 +2370,28 @@ static struct attribute *bmc_dev_attrs[] = {
        &dev_attr_additional_device_support.attr,
        &dev_attr_manufacturer_id.attr,
        &dev_attr_product_id.attr,
+       &dev_attr_aux_firmware_revision.attr,
+       &dev_attr_guid.attr,
        NULL
 };
 
+static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
+                                      struct attribute *attr, int idx)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct bmc_device *bmc = to_bmc_device(dev);
+       umode_t mode = attr->mode;
+
+       if (attr == &dev_attr_aux_firmware_revision.attr)
+               return bmc->id.aux_firmware_revision_set ? mode : 0;
+       if (attr == &dev_attr_guid.attr)
+               return bmc->guid_set ? mode : 0;
+       return mode;
+}
+
 static struct attribute_group bmc_dev_attr_group = {
        .attrs          = bmc_dev_attrs,
+       .is_visible     = bmc_dev_attr_is_visible,
 };
 
 static const struct attribute_group *bmc_dev_attr_groups[] = {
@@ -2380,13 +2414,6 @@ cleanup_bmc_device(struct kref *ref)
 {
        struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
 
-       if (bmc->id.aux_firmware_revision_set)
-               device_remove_file(&bmc->pdev.dev,
-                                  &dev_attr_aux_firmware_revision);
-       if (bmc->guid_set)
-               device_remove_file(&bmc->pdev.dev,
-                                  &dev_attr_guid);
-
        platform_device_unregister(&bmc->pdev);
 }
 
@@ -2407,33 +2434,6 @@ static void ipmi_bmc_unregister(ipmi_smi_t intf)
        mutex_unlock(&ipmidriver_mutex);
 }
 
-static int create_bmc_files(struct bmc_device *bmc)
-{
-       int err;
-
-       if (bmc->id.aux_firmware_revision_set) {
-               err = device_create_file(&bmc->pdev.dev,
-                                        &dev_attr_aux_firmware_revision);
-               if (err)
-                       goto out;
-       }
-       if (bmc->guid_set) {
-               err = device_create_file(&bmc->pdev.dev,
-                                        &dev_attr_guid);
-               if (err)
-                       goto out_aux_firm;
-       }
-
-       return 0;
-
-out_aux_firm:
-       if (bmc->id.aux_firmware_revision_set)
-               device_remove_file(&bmc->pdev.dev,
-                                  &dev_attr_aux_firmware_revision);
-out:
-       return err;
-}
-
 static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
 {
        int               rv;
@@ -2522,15 +2522,6 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
                        return rv;
                }
 
-               rv = create_bmc_files(bmc);
-               if (rv) {
-                       mutex_lock(&ipmidriver_mutex);
-                       platform_device_unregister(&bmc->pdev);
-                       mutex_unlock(&ipmidriver_mutex);
-
-                       return rv;
-               }
-
                dev_info(intf->si_dev, "Found new BMC (man_id: 0x%6.6x, "
                         "prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
                         bmc->id.manufacturer_id,
@@ -4212,7 +4203,6 @@ static void need_waiter(ipmi_smi_t intf)
 static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
 static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
 
-/* FIXME - convert these to slabs. */
 static void free_smi_msg(struct ipmi_smi_msg *msg)
 {
        atomic_dec(&smi_msg_inuse_count);
index 967b73aa4e66d31481d6ae9488ab567530661f76..f6646ed3047e09a3656b089491f0afaa14982af1 100644 (file)
@@ -321,6 +321,18 @@ static int try_smi_init(struct smi_info *smi);
 static void cleanup_one_si(struct smi_info *to_clean);
 static void cleanup_ipmi_si(void);
 
+#ifdef DEBUG_TIMING
+void debug_timestamp(char *msg)
+{
+       struct timespec64 t;
+
+       getnstimeofday64(&t);
+       pr_debug("**%s: %lld.%9.9ld\n", msg, (long long) t.tv_sec, t.tv_nsec);
+}
+#else
+#define debug_timestamp(x)
+#endif
+
 static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
 static int register_xaction_notifier(struct notifier_block *nb)
 {
@@ -358,9 +370,6 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
 static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 {
        int              rv;
-#ifdef DEBUG_TIMING
-       struct timeval t;
-#endif
 
        if (!smi_info->waiting_msg) {
                smi_info->curr_msg = NULL;
@@ -370,10 +379,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
 
                smi_info->curr_msg = smi_info->waiting_msg;
                smi_info->waiting_msg = NULL;
-#ifdef DEBUG_TIMING
-               do_gettimeofday(&t);
-               printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+               debug_timestamp("Start2");
                err = atomic_notifier_call_chain(&xaction_notifier_list,
                                0, smi_info);
                if (err & NOTIFY_STOP_MASK) {
@@ -582,12 +588,8 @@ static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
 static void handle_transaction_done(struct smi_info *smi_info)
 {
        struct ipmi_smi_msg *msg;
-#ifdef DEBUG_TIMING
-       struct timeval t;
 
-       do_gettimeofday(&t);
-       printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("Done");
        switch (smi_info->si_state) {
        case SI_NORMAL:
                if (!smi_info->curr_msg)
@@ -929,24 +931,15 @@ static void sender(void                *send_info,
        struct smi_info   *smi_info = send_info;
        enum si_sm_result result;
        unsigned long     flags;
-#ifdef DEBUG_TIMING
-       struct timeval    t;
-#endif
-
-       BUG_ON(smi_info->waiting_msg);
-       smi_info->waiting_msg = msg;
 
-#ifdef DEBUG_TIMING
-       do_gettimeofday(&t);
-       printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("Enqueue");
 
        if (smi_info->run_to_completion) {
                /*
                 * If we are running to completion, start it and run
                 * transactions until everything is clear.
                 */
-               smi_info->curr_msg = smi_info->waiting_msg;
+               smi_info->curr_msg = msg;
                smi_info->waiting_msg = NULL;
 
                /*
@@ -964,6 +957,15 @@ static void sender(void                *send_info,
        }
 
        spin_lock_irqsave(&smi_info->si_lock, flags);
+       /*
+        * The following two lines don't need to be under the lock for
+        * the lock's sake, but they do need SMP memory barriers to
+        * avoid getting things out of order.  We are already claiming
+        * the lock, anyway, so just do it under the lock to avoid the
+        * ordering problem.
+        */
+       BUG_ON(smi_info->waiting_msg);
+       smi_info->waiting_msg = msg;
        check_start_timer_thread(smi_info);
        spin_unlock_irqrestore(&smi_info->si_lock, flags);
 }
@@ -989,18 +991,18 @@ static void set_run_to_completion(void *send_info, bool i_run_to_completion)
  * we are spinning in kipmid looking for something and not delaying
  * between checks
  */
-static inline void ipmi_si_set_not_busy(struct timespec *ts)
+static inline void ipmi_si_set_not_busy(struct timespec64 *ts)
 {
        ts->tv_nsec = -1;
 }
-static inline int ipmi_si_is_busy(struct timespec *ts)
+static inline int ipmi_si_is_busy(struct timespec64 *ts)
 {
        return ts->tv_nsec != -1;
 }
 
 static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
                                        const struct smi_info *smi_info,
-                                       struct timespec *busy_until)
+                                       struct timespec64 *busy_until)
 {
        unsigned int max_busy_us = 0;
 
@@ -1009,12 +1011,13 @@ static inline int ipmi_thread_busy_wait(enum si_sm_result smi_result,
        if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
                ipmi_si_set_not_busy(busy_until);
        else if (!ipmi_si_is_busy(busy_until)) {
-               getnstimeofday(busy_until);
-               timespec_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
+               getnstimeofday64(busy_until);
+               timespec64_add_ns(busy_until, max_busy_us*NSEC_PER_USEC);
        } else {
-               struct timespec now;
-               getnstimeofday(&now);
-               if (unlikely(timespec_compare(&now, busy_until) > 0)) {
+               struct timespec64 now;
+
+               getnstimeofday64(&now);
+               if (unlikely(timespec64_compare(&now, busy_until) > 0)) {
                        ipmi_si_set_not_busy(busy_until);
                        return 0;
                }
@@ -1037,7 +1040,7 @@ static int ipmi_thread(void *data)
        struct smi_info *smi_info = data;
        unsigned long flags;
        enum si_sm_result smi_result;
-       struct timespec busy_until;
+       struct timespec64 busy_until;
 
        ipmi_si_set_not_busy(&busy_until);
        set_user_nice(current, MAX_NICE);
@@ -1128,15 +1131,10 @@ static void smi_timeout(unsigned long data)
        unsigned long     jiffies_now;
        long              time_diff;
        long              timeout;
-#ifdef DEBUG_TIMING
-       struct timeval    t;
-#endif
 
        spin_lock_irqsave(&(smi_info->si_lock), flags);
-#ifdef DEBUG_TIMING
-       do_gettimeofday(&t);
-       printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("Timer");
+
        jiffies_now = jiffies;
        time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
                     * SI_USEC_PER_JIFFY);
@@ -1173,18 +1171,13 @@ static irqreturn_t si_irq_handler(int irq, void *data)
 {
        struct smi_info *smi_info = data;
        unsigned long   flags;
-#ifdef DEBUG_TIMING
-       struct timeval  t;
-#endif
 
        spin_lock_irqsave(&(smi_info->si_lock), flags);
 
        smi_inc_stat(smi_info, interrupts);
 
-#ifdef DEBUG_TIMING
-       do_gettimeofday(&t);
-       printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("Interrupt");
+
        smi_event_handler(smi_info, 0);
        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
        return IRQ_HANDLED;
@@ -2038,18 +2031,13 @@ static u32 ipmi_acpi_gpe(acpi_handle gpe_device,
 {
        struct smi_info *smi_info = context;
        unsigned long   flags;
-#ifdef DEBUG_TIMING
-       struct timeval t;
-#endif
 
        spin_lock_irqsave(&(smi_info->si_lock), flags);
 
        smi_inc_stat(smi_info, interrupts);
 
-#ifdef DEBUG_TIMING
-       do_gettimeofday(&t);
-       printk("**ACPI_GPE: %d.%9.9d\n", t.tv_sec, t.tv_usec);
-#endif
+       debug_timestamp("ACPI_GPE");
+
        smi_event_handler(smi_info, 0);
        spin_unlock_irqrestore(&(smi_info->si_lock), flags);
 
@@ -2071,7 +2059,6 @@ static int acpi_gpe_irq_setup(struct smi_info *info)
        if (!info->irq)
                return 0;
 
-       /* FIXME - is level triggered right? */
        status = acpi_install_gpe_handler(NULL,
                                          info->irq,
                                          ACPI_GPE_LEVEL_TRIGGERED,
@@ -2998,7 +2985,9 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
 {
        struct smi_info *smi = m->private;
 
-       return seq_printf(m, "%s\n", si_to_str[smi->si_type]);
+       seq_printf(m, "%s\n", si_to_str[smi->si_type]);
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -3060,16 +3049,18 @@ static int smi_params_proc_show(struct seq_file *m, void *v)
 {
        struct smi_info *smi = m->private;
 
-       return seq_printf(m,
-                      "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
-                      si_to_str[smi->si_type],
-                      addr_space_to_str[smi->io.addr_type],
-                      smi->io.addr_data,
-                      smi->io.regspacing,
-                      smi->io.regsize,
-                      smi->io.regshift,
-                      smi->irq,
-                      smi->slave_addr);
+       seq_printf(m,
+                  "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
+                  si_to_str[smi->si_type],
+                  addr_space_to_str[smi->io.addr_type],
+                  smi->io.addr_data,
+                  smi->io.regspacing,
+                  smi->io.regsize,
+                  smi->io.regshift,
+                  smi->irq,
+                  smi->slave_addr);
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_params_proc_open(struct inode *inode, struct file *file)
index 982b96323f823b8402ede2ceec7c0cb85c042ec4..f6e378dac5f5b1031530839d5967ab96607f1f7b 100644 (file)
@@ -1097,8 +1097,6 @@ static int ssif_remove(struct i2c_client *client)
        if (!ssif_info)
                return 0;
 
-       i2c_set_clientdata(client, NULL);
-
        /*
         * After this point, we won't deliver anything asychronously
         * to the message handler.  We can unregister ourself.
@@ -1198,7 +1196,9 @@ static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
 
 static int smi_type_proc_show(struct seq_file *m, void *v)
 {
-       return seq_puts(m, "ssif\n");
+       seq_puts(m, "ssif\n");
+
+       return seq_has_overflowed(m);
 }
 
 static int smi_type_proc_open(struct inode *inode, struct file *file)
index 91f86131bb7aa62b0c4632e2defa1f8b2f4e6abc..0b474a04730fe4d2c588cb1a3a1818dbff61f995 100644 (file)
@@ -102,12 +102,12 @@ config COMMON_CLK_AXI_CLKGEN
          Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx
          FPGAs. It is commonly used in Analog Devices' reference designs.
 
-config CLK_PPC_CORENET
-       bool "Clock driver for PowerPC corenet platforms"
-       depends on PPC_E500MC && OF
+config CLK_QORIQ
+       bool "Clock driver for Freescale QorIQ platforms"
+       depends on (PPC_E500MC || ARM) && OF
        ---help---
-         This adds the clock driver support for Freescale PowerPC corenet
-         platforms using common clock framework.
+         This adds the clock driver support for Freescale QorIQ platforms
+         using common clock framework.
 
 config COMMON_CLK_XGENE
        bool "Clock driver for APM XGene SoC"
@@ -135,6 +135,14 @@ config COMMON_CLK_PXA
        ---help---
          Sypport for the Marvell PXA SoC.
 
+config COMMON_CLK_CDCE706
+       tristate "Clock driver for TI CDCE706 clock synthesizer"
+       depends on I2C
+       select REGMAP_I2C
+       select RATIONAL
+       ---help---
+         This driver supports TI CDCE706 programmable 3-PLL clock synthesizer.
+
 source "drivers/clk/qcom/Kconfig"
 
 endmenu
index d5fba5bc6e1bc1f07991be58f367c30a253a58f7..d478ceb69c5fc6b4a1bee49276b37763db36c79d 100644 (file)
@@ -16,9 +16,11 @@ endif
 
 # hardware specific clock types
 # please keep this section sorted lexicographically by file/directory path name
+obj-$(CONFIG_MACH_ASM9260)             += clk-asm9260.o
 obj-$(CONFIG_COMMON_CLK_AXI_CLKGEN)    += clk-axi-clkgen.o
 obj-$(CONFIG_ARCH_AXXIA)               += clk-axm5516.o
 obj-$(CONFIG_ARCH_BCM2835)             += clk-bcm2835.o
+obj-$(CONFIG_COMMON_CLK_CDCE706)       += clk-cdce706.o
 obj-$(CONFIG_ARCH_CLPS711X)            += clk-clps711x.o
 obj-$(CONFIG_ARCH_EFM32)               += clk-efm32gg.o
 obj-$(CONFIG_ARCH_HIGHBANK)            += clk-highbank.o
@@ -30,7 +32,7 @@ obj-$(CONFIG_ARCH_MOXART)             += clk-moxart.o
 obj-$(CONFIG_ARCH_NOMADIK)             += clk-nomadik.o
 obj-$(CONFIG_ARCH_NSPIRE)              += clk-nspire.o
 obj-$(CONFIG_COMMON_CLK_PALMAS)                += clk-palmas.o
-obj-$(CONFIG_CLK_PPC_CORENET)          += clk-ppc-corenet.o
+obj-$(CONFIG_CLK_QORIQ)                        += clk-qoriq.o
 obj-$(CONFIG_COMMON_CLK_RK808)         += clk-rk808.o
 obj-$(CONFIG_COMMON_CLK_S2MPS11)       += clk-s2mps11.o
 obj-$(CONFIG_COMMON_CLK_SI5351)                += clk-si5351.o
index bbdb1b985c9146a5e82013fd8d8ab20ea3ebc5fc..86c8a073dcc32a20b98f4c862d5622b11ffe1d57 100644 (file)
@@ -56,6 +56,8 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
 
 static long clk_programmable_determine_rate(struct clk_hw *hw,
                                            unsigned long rate,
+                                           unsigned long min_rate,
+                                           unsigned long max_rate,
                                            unsigned long *best_parent_rate,
                                            struct clk_hw **best_parent_hw)
 {
index 1c06f6f3a8c59959b90e90f554c048e1535a1893..05abae89262e20923f5f39321c33165bdab6ea7f 100644 (file)
@@ -1032,6 +1032,8 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
 }
 
 static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate,
+               unsigned long max_rate,
                unsigned long *best_parent_rate, struct clk_hw **best_parent)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
diff --git a/drivers/clk/clk-asm9260.c b/drivers/clk/clk-asm9260.c
new file mode 100644 (file)
index 0000000..88f4ff6
--- /dev/null
@@ -0,0 +1,348 @@
+/*
+ * Copyright (c) 2014 Oleksij Rempel <linux@rempel-privat.de>.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/clk-provider.h>
+#include <linux/spinlock.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <dt-bindings/clock/alphascale,asm9260.h>
+
+#define HW_AHBCLKCTRL0         0x0020
+#define HW_AHBCLKCTRL1         0x0030
+#define HW_SYSPLLCTRL          0x0100
+#define HW_MAINCLKSEL          0x0120
+#define HW_MAINCLKUEN          0x0124
+#define HW_UARTCLKSEL          0x0128
+#define HW_UARTCLKUEN          0x012c
+#define HW_I2S0CLKSEL          0x0130
+#define HW_I2S0CLKUEN          0x0134
+#define HW_I2S1CLKSEL          0x0138
+#define HW_I2S1CLKUEN          0x013c
+#define HW_WDTCLKSEL           0x0160
+#define HW_WDTCLKUEN           0x0164
+#define HW_CLKOUTCLKSEL                0x0170
+#define HW_CLKOUTCLKUEN                0x0174
+#define HW_CPUCLKDIV           0x017c
+#define HW_SYSAHBCLKDIV                0x0180
+#define HW_I2S0MCLKDIV         0x0190
+#define HW_I2S0SCLKDIV         0x0194
+#define HW_I2S1MCLKDIV         0x0188
+#define HW_I2S1SCLKDIV         0x018c
+#define HW_UART0CLKDIV         0x0198
+#define HW_UART1CLKDIV         0x019c
+#define HW_UART2CLKDIV         0x01a0
+#define HW_UART3CLKDIV         0x01a4
+#define HW_UART4CLKDIV         0x01a8
+#define HW_UART5CLKDIV         0x01ac
+#define HW_UART6CLKDIV         0x01b0
+#define HW_UART7CLKDIV         0x01b4
+#define HW_UART8CLKDIV         0x01b8
+#define HW_UART9CLKDIV         0x01bc
+#define HW_SPI0CLKDIV          0x01c0
+#define HW_SPI1CLKDIV          0x01c4
+#define HW_QUADSPICLKDIV       0x01c8
+#define HW_SSP0CLKDIV          0x01d0
+#define HW_NANDCLKDIV          0x01d4
+#define HW_TRACECLKDIV         0x01e0
+#define HW_CAMMCLKDIV          0x01e8
+#define HW_WDTCLKDIV           0x01ec
+#define HW_CLKOUTCLKDIV                0x01f4
+#define HW_MACCLKDIV           0x01f8
+#define HW_LCDCLKDIV           0x01fc
+#define HW_ADCANACLKDIV                0x0200
+
+static struct clk *clks[MAX_CLKS];
+static struct clk_onecell_data clk_data;
+static DEFINE_SPINLOCK(asm9260_clk_lock);
+
+struct asm9260_div_clk {
+       unsigned int idx;
+       const char *name;
+       const char *parent_name;
+       u32 reg;
+};
+
+struct asm9260_gate_data {
+       unsigned int idx;
+       const char *name;
+       const char *parent_name;
+       u32 reg;
+       u8 bit_idx;
+       unsigned long flags;
+};
+
+struct asm9260_mux_clock {
+       u8                      mask;
+       u32                     *table;
+       const char              *name;
+       const char              **parent_names;
+       u8                      num_parents;
+       unsigned long           offset;
+       unsigned long           flags;
+};
+
+static void __iomem *base;
+
+static const struct asm9260_div_clk asm9260_div_clks[] __initconst = {
+       { CLKID_SYS_CPU,        "cpu_div", "main_gate", HW_CPUCLKDIV },
+       { CLKID_SYS_AHB,        "ahb_div", "cpu_div", HW_SYSAHBCLKDIV },
+
+       /* i2s has two deviders: one for only external mclk and internal
+        * devider for all clks. */
+       { CLKID_SYS_I2S0M,      "i2s0m_div", "i2s0_mclk",  HW_I2S0MCLKDIV },
+       { CLKID_SYS_I2S1M,      "i2s1m_div", "i2s1_mclk",  HW_I2S1MCLKDIV },
+       { CLKID_SYS_I2S0S,      "i2s0s_div", "i2s0_gate",  HW_I2S0SCLKDIV },
+       { CLKID_SYS_I2S1S,      "i2s1s_div", "i2s0_gate",  HW_I2S1SCLKDIV },
+
+       { CLKID_SYS_UART0,      "uart0_div", "uart_gate", HW_UART0CLKDIV },
+       { CLKID_SYS_UART1,      "uart1_div", "uart_gate", HW_UART1CLKDIV },
+       { CLKID_SYS_UART2,      "uart2_div", "uart_gate", HW_UART2CLKDIV },
+       { CLKID_SYS_UART3,      "uart3_div", "uart_gate", HW_UART3CLKDIV },
+       { CLKID_SYS_UART4,      "uart4_div", "uart_gate", HW_UART4CLKDIV },
+       { CLKID_SYS_UART5,      "uart5_div", "uart_gate", HW_UART5CLKDIV },
+       { CLKID_SYS_UART6,      "uart6_div", "uart_gate", HW_UART6CLKDIV },
+       { CLKID_SYS_UART7,      "uart7_div", "uart_gate", HW_UART7CLKDIV },
+       { CLKID_SYS_UART8,      "uart8_div", "uart_gate", HW_UART8CLKDIV },
+       { CLKID_SYS_UART9,      "uart9_div", "uart_gate", HW_UART9CLKDIV },
+
+       { CLKID_SYS_SPI0,       "spi0_div",     "main_gate", HW_SPI0CLKDIV },
+       { CLKID_SYS_SPI1,       "spi1_div",     "main_gate", HW_SPI1CLKDIV },
+       { CLKID_SYS_QUADSPI,    "quadspi_div",  "main_gate", HW_QUADSPICLKDIV },
+       { CLKID_SYS_SSP0,       "ssp0_div",     "main_gate", HW_SSP0CLKDIV },
+       { CLKID_SYS_NAND,       "nand_div",     "main_gate", HW_NANDCLKDIV },
+       { CLKID_SYS_TRACE,      "trace_div",    "main_gate", HW_TRACECLKDIV },
+       { CLKID_SYS_CAMM,       "camm_div",     "main_gate", HW_CAMMCLKDIV },
+       { CLKID_SYS_MAC,        "mac_div",      "main_gate", HW_MACCLKDIV },
+       { CLKID_SYS_LCD,        "lcd_div",      "main_gate", HW_LCDCLKDIV },
+       { CLKID_SYS_ADCANA,     "adcana_div",   "main_gate", HW_ADCANACLKDIV },
+
+       { CLKID_SYS_WDT,        "wdt_div",      "wdt_gate",    HW_WDTCLKDIV },
+       { CLKID_SYS_CLKOUT,     "clkout_div",   "clkout_gate", HW_CLKOUTCLKDIV },
+};
+
+static const struct asm9260_gate_data asm9260_mux_gates[] __initconst = {
+       { 0, "main_gate",       "main_mux",     HW_MAINCLKUEN,  0 },
+       { 0, "uart_gate",       "uart_mux",     HW_UARTCLKUEN,  0 },
+       { 0, "i2s0_gate",       "i2s0_mux",     HW_I2S0CLKUEN,  0 },
+       { 0, "i2s1_gate",       "i2s1_mux",     HW_I2S1CLKUEN,  0 },
+       { 0, "wdt_gate",        "wdt_mux",      HW_WDTCLKUEN,   0 },
+       { 0, "clkout_gate",     "clkout_mux",   HW_CLKOUTCLKUEN, 0 },
+};
+static const struct asm9260_gate_data asm9260_ahb_gates[] __initconst = {
+       /* ahb gates */
+       { CLKID_AHB_ROM,        "rom",          "ahb_div",
+               HW_AHBCLKCTRL0, 1, CLK_IGNORE_UNUSED},
+       { CLKID_AHB_RAM,        "ram",          "ahb_div",
+               HW_AHBCLKCTRL0, 2, CLK_IGNORE_UNUSED},
+       { CLKID_AHB_GPIO,       "gpio",         "ahb_div",
+               HW_AHBCLKCTRL0, 4 },
+       { CLKID_AHB_MAC,        "mac",          "ahb_div",
+               HW_AHBCLKCTRL0, 5 },
+       { CLKID_AHB_EMI,        "emi",          "ahb_div",
+               HW_AHBCLKCTRL0, 6, CLK_IGNORE_UNUSED},
+       { CLKID_AHB_USB0,       "usb0",         "ahb_div",
+               HW_AHBCLKCTRL0, 7 },
+       { CLKID_AHB_USB1,       "usb1",         "ahb_div",
+               HW_AHBCLKCTRL0, 8 },
+       { CLKID_AHB_DMA0,       "dma0",         "ahb_div",
+               HW_AHBCLKCTRL0, 9 },
+       { CLKID_AHB_DMA1,       "dma1",         "ahb_div",
+               HW_AHBCLKCTRL0, 10 },
+       { CLKID_AHB_UART0,      "uart0",        "ahb_div",
+               HW_AHBCLKCTRL0, 11 },
+       { CLKID_AHB_UART1,      "uart1",        "ahb_div",
+               HW_AHBCLKCTRL0, 12 },
+       { CLKID_AHB_UART2,      "uart2",        "ahb_div",
+               HW_AHBCLKCTRL0, 13 },
+       { CLKID_AHB_UART3,      "uart3",        "ahb_div",
+               HW_AHBCLKCTRL0, 14 },
+       { CLKID_AHB_UART4,      "uart4",        "ahb_div",
+               HW_AHBCLKCTRL0, 15 },
+       { CLKID_AHB_UART5,      "uart5",        "ahb_div",
+               HW_AHBCLKCTRL0, 16 },
+       { CLKID_AHB_UART6,      "uart6",        "ahb_div",
+               HW_AHBCLKCTRL0, 17 },
+       { CLKID_AHB_UART7,      "uart7",        "ahb_div",
+               HW_AHBCLKCTRL0, 18 },
+       { CLKID_AHB_UART8,      "uart8",        "ahb_div",
+               HW_AHBCLKCTRL0, 19 },
+       { CLKID_AHB_UART9,      "uart9",        "ahb_div",
+               HW_AHBCLKCTRL0, 20 },
+       { CLKID_AHB_I2S0,       "i2s0",         "ahb_div",
+               HW_AHBCLKCTRL0, 21 },
+       { CLKID_AHB_I2C0,       "i2c0",         "ahb_div",
+               HW_AHBCLKCTRL0, 22 },
+       { CLKID_AHB_I2C1,       "i2c1",         "ahb_div",
+               HW_AHBCLKCTRL0, 23 },
+       { CLKID_AHB_SSP0,       "ssp0",         "ahb_div",
+               HW_AHBCLKCTRL0, 24 },
+       { CLKID_AHB_IOCONFIG,   "ioconf",       "ahb_div",
+               HW_AHBCLKCTRL0, 25 },
+       { CLKID_AHB_WDT,        "wdt",          "ahb_div",
+               HW_AHBCLKCTRL0, 26 },
+       { CLKID_AHB_CAN0,       "can0",         "ahb_div",
+               HW_AHBCLKCTRL0, 27 },
+       { CLKID_AHB_CAN1,       "can1",         "ahb_div",
+               HW_AHBCLKCTRL0, 28 },
+       { CLKID_AHB_MPWM,       "mpwm",         "ahb_div",
+               HW_AHBCLKCTRL0, 29 },
+       { CLKID_AHB_SPI0,       "spi0",         "ahb_div",
+               HW_AHBCLKCTRL0, 30 },
+       { CLKID_AHB_SPI1,       "spi1",         "ahb_div",
+               HW_AHBCLKCTRL0, 31 },
+
+       { CLKID_AHB_QEI,        "qei",          "ahb_div",
+               HW_AHBCLKCTRL1, 0 },
+       { CLKID_AHB_QUADSPI0,   "quadspi0",     "ahb_div",
+               HW_AHBCLKCTRL1, 1 },
+       { CLKID_AHB_CAMIF,      "capmif",       "ahb_div",
+               HW_AHBCLKCTRL1, 2 },
+       { CLKID_AHB_LCDIF,      "lcdif",        "ahb_div",
+               HW_AHBCLKCTRL1, 3 },
+       { CLKID_AHB_TIMER0,     "timer0",       "ahb_div",
+               HW_AHBCLKCTRL1, 4 },
+       { CLKID_AHB_TIMER1,     "timer1",       "ahb_div",
+               HW_AHBCLKCTRL1, 5 },
+       { CLKID_AHB_TIMER2,     "timer2",       "ahb_div",
+               HW_AHBCLKCTRL1, 6 },
+       { CLKID_AHB_TIMER3,     "timer3",       "ahb_div",
+               HW_AHBCLKCTRL1, 7 },
+       { CLKID_AHB_IRQ,        "irq",          "ahb_div",
+               HW_AHBCLKCTRL1, 8, CLK_IGNORE_UNUSED},
+       { CLKID_AHB_RTC,        "rtc",          "ahb_div",
+               HW_AHBCLKCTRL1, 9 },
+       { CLKID_AHB_NAND,       "nand",         "ahb_div",
+               HW_AHBCLKCTRL1, 10 },
+       { CLKID_AHB_ADC0,       "adc0",         "ahb_div",
+               HW_AHBCLKCTRL1, 11 },
+       { CLKID_AHB_LED,        "led",          "ahb_div",
+               HW_AHBCLKCTRL1, 12 },
+       { CLKID_AHB_DAC0,       "dac0",         "ahb_div",
+               HW_AHBCLKCTRL1, 13 },
+       { CLKID_AHB_LCD,        "lcd",          "ahb_div",
+               HW_AHBCLKCTRL1, 14 },
+       { CLKID_AHB_I2S1,       "i2s1",         "ahb_div",
+               HW_AHBCLKCTRL1, 15 },
+       { CLKID_AHB_MAC1,       "mac1",         "ahb_div",
+               HW_AHBCLKCTRL1, 16 },
+};
+
+static const char __initdata *main_mux_p[] =   { NULL, NULL };
+static const char __initdata *i2s0_mux_p[] =   { NULL, NULL, "i2s0m_div"};
+static const char __initdata *i2s1_mux_p[] =   { NULL, NULL, "i2s1m_div"};
+static const char __initdata *clkout_mux_p[] = { NULL, NULL, "rtc"};
+static u32 three_mux_table[] = {0, 1, 3};
+
+static struct asm9260_mux_clock asm9260_mux_clks[] __initdata = {
+       { 1, three_mux_table, "main_mux",       main_mux_p,
+               ARRAY_SIZE(main_mux_p), HW_MAINCLKSEL, },
+       { 1, three_mux_table, "uart_mux",       main_mux_p,
+               ARRAY_SIZE(main_mux_p), HW_UARTCLKSEL, },
+       { 1, three_mux_table, "wdt_mux",        main_mux_p,
+               ARRAY_SIZE(main_mux_p), HW_WDTCLKSEL, },
+       { 3, three_mux_table, "i2s0_mux",       i2s0_mux_p,
+               ARRAY_SIZE(i2s0_mux_p), HW_I2S0CLKSEL, },
+       { 3, three_mux_table, "i2s1_mux",       i2s1_mux_p,
+               ARRAY_SIZE(i2s1_mux_p), HW_I2S1CLKSEL, },
+       { 3, three_mux_table, "clkout_mux",     clkout_mux_p,
+               ARRAY_SIZE(clkout_mux_p), HW_CLKOUTCLKSEL, },
+};
+
+static void __init asm9260_acc_init(struct device_node *np)
+{
+       struct clk *clk;
+       const char *ref_clk, *pll_clk = "pll";
+       u32 rate;
+       int n;
+       u32 accuracy = 0;
+
+       base = of_io_request_and_map(np, 0, np->name);
+       if (!base)
+               panic("%s: unable to map resource", np->name);
+
+       /* register pll */
+       rate = (ioread32(base + HW_SYSPLLCTRL) & 0xffff) * 1000000;
+
+       ref_clk = of_clk_get_parent_name(np, 0);
+       accuracy = clk_get_accuracy(__clk_lookup(ref_clk));
+       clk = clk_register_fixed_rate_with_accuracy(NULL, pll_clk,
+                       ref_clk, 0, rate, accuracy);
+
+       if (IS_ERR(clk))
+               panic("%s: can't register REFCLK. Check DT!", np->name);
+
+       for (n = 0; n < ARRAY_SIZE(asm9260_mux_clks); n++) {
+               const struct asm9260_mux_clock *mc = &asm9260_mux_clks[n];
+
+               mc->parent_names[0] = ref_clk;
+               mc->parent_names[1] = pll_clk;
+               clk = clk_register_mux_table(NULL, mc->name, mc->parent_names,
+                               mc->num_parents, mc->flags, base + mc->offset,
+                               0, mc->mask, 0, mc->table, &asm9260_clk_lock);
+       }
+
+       /* clock mux gate cells */
+       for (n = 0; n < ARRAY_SIZE(asm9260_mux_gates); n++) {
+               const struct asm9260_gate_data *gd = &asm9260_mux_gates[n];
+
+               clk = clk_register_gate(NULL, gd->name,
+                       gd->parent_name, gd->flags | CLK_SET_RATE_PARENT,
+                       base + gd->reg, gd->bit_idx, 0, &asm9260_clk_lock);
+       }
+
+       /* clock div cells */
+       for (n = 0; n < ARRAY_SIZE(asm9260_div_clks); n++) {
+               const struct asm9260_div_clk *dc = &asm9260_div_clks[n];
+
+               clks[dc->idx] = clk_register_divider(NULL, dc->name,
+                               dc->parent_name, CLK_SET_RATE_PARENT,
+                               base + dc->reg, 0, 8, CLK_DIVIDER_ONE_BASED,
+                               &asm9260_clk_lock);
+       }
+
+       /* clock ahb gate cells */
+       for (n = 0; n < ARRAY_SIZE(asm9260_ahb_gates); n++) {
+               const struct asm9260_gate_data *gd = &asm9260_ahb_gates[n];
+
+               clks[gd->idx] = clk_register_gate(NULL, gd->name,
+                               gd->parent_name, gd->flags, base + gd->reg,
+                               gd->bit_idx, 0, &asm9260_clk_lock);
+       }
+
+       /* check for errors on leaf clocks */
+       for (n = 0; n < MAX_CLKS; n++) {
+               if (!IS_ERR(clks[n]))
+                       continue;
+
+               pr_err("%s: Unable to register leaf clock %d\n",
+                               np->full_name, n);
+               goto fail;
+       }
+
+       /* register clk-provider */
+       clk_data.clks = clks;
+       clk_data.clk_num = MAX_CLKS;
+       of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
+       return;
+fail:
+       iounmap(base);
+}
+CLK_OF_DECLARE(asm9260_acc, "alphascale,asm9260-clock-controller",
+               asm9260_acc_init);
diff --git a/drivers/clk/clk-cdce706.c b/drivers/clk/clk-cdce706.c
new file mode 100644 (file)
index 0000000..c386ad2
--- /dev/null
@@ -0,0 +1,700 @@
+/*
+ * TI CDCE706 programmable 3-PLL clock synthesizer driver
+ *
+ * Copyright (c) 2014 Cadence Design Systems Inc.
+ *
+ * Reference: http://www.ti.com/lit/ds/symlink/cdce706.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/rational.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+
+#define CDCE706_CLKIN_CLOCK            10
+#define CDCE706_CLKIN_SOURCE           11
+#define CDCE706_PLL_M_LOW(pll)         (1 + 3 * (pll))
+#define CDCE706_PLL_N_LOW(pll)         (2 + 3 * (pll))
+#define CDCE706_PLL_HI(pll)            (3 + 3 * (pll))
+#define CDCE706_PLL_MUX                        3
+#define CDCE706_PLL_FVCO               6
+#define CDCE706_DIVIDER(div)           (13 + (div))
+#define CDCE706_CLKOUT(out)            (19 + (out))
+
+#define CDCE706_CLKIN_CLOCK_MASK       0x10
+#define CDCE706_CLKIN_SOURCE_SHIFT     6
+#define CDCE706_CLKIN_SOURCE_MASK      0xc0
+#define CDCE706_CLKIN_SOURCE_LVCMOS    0x40
+
+#define CDCE706_PLL_MUX_MASK(pll)      (0x80 >> (pll))
+#define CDCE706_PLL_LOW_M_MASK         0xff
+#define CDCE706_PLL_LOW_N_MASK         0xff
+#define CDCE706_PLL_HI_M_MASK          0x1
+#define CDCE706_PLL_HI_N_MASK          0x1e
+#define CDCE706_PLL_HI_N_SHIFT         1
+#define CDCE706_PLL_M_MAX              0x1ff
+#define CDCE706_PLL_N_MAX              0xfff
+#define CDCE706_PLL_FVCO_MASK(pll)     (0x80 >> (pll))
+#define CDCE706_PLL_FREQ_MIN            80000000
+#define CDCE706_PLL_FREQ_MAX           300000000
+#define CDCE706_PLL_FREQ_HI            180000000
+
+#define CDCE706_DIVIDER_PLL(div)       (9 + (div) - ((div) > 2) - ((div) > 4))
+#define CDCE706_DIVIDER_PLL_SHIFT(div) ((div) < 2 ? 5 : 3 * ((div) & 1))
+#define CDCE706_DIVIDER_PLL_MASK(div)  (0x7 << CDCE706_DIVIDER_PLL_SHIFT(div))
+#define CDCE706_DIVIDER_DIVIDER_MASK   0x7f
+#define CDCE706_DIVIDER_DIVIDER_MAX    0x7f
+
+#define CDCE706_CLKOUT_DIVIDER_MASK    0x7
+#define CDCE706_CLKOUT_ENABLE_MASK     0x8
+
+static struct regmap_config cdce706_regmap_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .val_format_endian = REGMAP_ENDIAN_NATIVE,
+};
+
+#define to_hw_data(phw) (container_of((phw), struct cdce706_hw_data, hw))
+
+struct cdce706_hw_data {
+       struct cdce706_dev_data *dev_data;
+       unsigned idx;
+       unsigned parent;
+       struct clk *clk;
+       struct clk_hw hw;
+       unsigned div;
+       unsigned mul;
+       unsigned mux;
+};
+
+struct cdce706_dev_data {
+       struct i2c_client *client;
+       struct regmap *regmap;
+       struct clk_onecell_data onecell;
+       struct clk *clks[6];
+       struct clk *clkin_clk[2];
+       const char *clkin_name[2];
+       struct cdce706_hw_data clkin[1];
+       struct cdce706_hw_data pll[3];
+       struct cdce706_hw_data divider[6];
+       struct cdce706_hw_data clkout[6];
+};
+
+static const char * const cdce706_source_name[] = {
+       "clk_in0", "clk_in1",
+};
+
+static const char *cdce706_clkin_name[] = {
+       "clk_in",
+};
+
+static const char * const cdce706_pll_name[] = {
+       "pll1", "pll2", "pll3",
+};
+
+static const char *cdce706_divider_parent_name[] = {
+       "clk_in", "pll1", "pll2", "pll2", "pll3",
+};
+
+static const char *cdce706_divider_name[] = {
+       "p0", "p1", "p2", "p3", "p4", "p5",
+};
+
+static const char * const cdce706_clkout_name[] = {
+       "clk_out0", "clk_out1", "clk_out2", "clk_out3", "clk_out4", "clk_out5",
+};
+
+static int cdce706_reg_read(struct cdce706_dev_data *dev_data, unsigned reg,
+                           unsigned *val)
+{
+       int rc = regmap_read(dev_data->regmap, reg | 0x80, val);
+
+       if (rc < 0)
+               dev_err(&dev_data->client->dev, "error reading reg %u", reg);
+       return rc;
+}
+
+static int cdce706_reg_write(struct cdce706_dev_data *dev_data, unsigned reg,
+                            unsigned val)
+{
+       int rc = regmap_write(dev_data->regmap, reg | 0x80, val);
+
+       if (rc < 0)
+               dev_err(&dev_data->client->dev, "error writing reg %u", reg);
+       return rc;
+}
+
+static int cdce706_reg_update(struct cdce706_dev_data *dev_data, unsigned reg,
+                             unsigned mask, unsigned val)
+{
+       int rc = regmap_update_bits(dev_data->regmap, reg | 0x80, mask, val);
+
+       if (rc < 0)
+               dev_err(&dev_data->client->dev, "error updating reg %u", reg);
+       return rc;
+}
+
+static int cdce706_clkin_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       hwd->parent = index;
+       return 0;
+}
+
+static u8 cdce706_clkin_get_parent(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       return hwd->parent;
+}
+
+static const struct clk_ops cdce706_clkin_ops = {
+       .set_parent = cdce706_clkin_set_parent,
+       .get_parent = cdce706_clkin_get_parent,
+};
+
+static unsigned long cdce706_pll_recalc_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, pll: %d, mux: %d, mul: %u, div: %u\n",
+               __func__, hwd->idx, hwd->mux, hwd->mul, hwd->div);
+
+       if (!hwd->mux) {
+               if (hwd->div && hwd->mul) {
+                       u64 res = (u64)parent_rate * hwd->mul;
+
+                       do_div(res, hwd->div);
+                       return res;
+               }
+       } else {
+               if (hwd->div)
+                       return parent_rate / hwd->div;
+       }
+       return 0;
+}
+
+static long cdce706_pll_round_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long *parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+       unsigned long mul, div;
+       u64 res;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, rate: %lu, parent_rate: %lu\n",
+               __func__, rate, *parent_rate);
+
+       rational_best_approximation(rate, *parent_rate,
+                                   CDCE706_PLL_N_MAX, CDCE706_PLL_M_MAX,
+                                   &mul, &div);
+       hwd->mul = mul;
+       hwd->div = div;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, pll: %d, mul: %lu, div: %lu\n",
+               __func__, hwd->idx, mul, div);
+
+       res = (u64)*parent_rate * hwd->mul;
+       do_div(res, hwd->div);
+       return res;
+}
+
+static int cdce706_pll_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+       unsigned long mul = hwd->mul, div = hwd->div;
+       int err;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, pll: %d, mul: %lu, div: %lu\n",
+               __func__, hwd->idx, mul, div);
+
+       err = cdce706_reg_update(hwd->dev_data,
+                                CDCE706_PLL_HI(hwd->idx),
+                                CDCE706_PLL_HI_M_MASK | CDCE706_PLL_HI_N_MASK,
+                                ((div >> 8) & CDCE706_PLL_HI_M_MASK) |
+                                ((mul >> (8 - CDCE706_PLL_HI_N_SHIFT)) &
+                                 CDCE706_PLL_HI_N_MASK));
+       if (err < 0)
+               return err;
+
+       err = cdce706_reg_write(hwd->dev_data,
+                               CDCE706_PLL_M_LOW(hwd->idx),
+                               div & CDCE706_PLL_LOW_M_MASK);
+       if (err < 0)
+               return err;
+
+       err = cdce706_reg_write(hwd->dev_data,
+                               CDCE706_PLL_N_LOW(hwd->idx),
+                               mul & CDCE706_PLL_LOW_N_MASK);
+       if (err < 0)
+               return err;
+
+       err = cdce706_reg_update(hwd->dev_data,
+                                CDCE706_PLL_FVCO,
+                                CDCE706_PLL_FVCO_MASK(hwd->idx),
+                                rate > CDCE706_PLL_FREQ_HI ?
+                                CDCE706_PLL_FVCO_MASK(hwd->idx) : 0);
+       return err;
+}
+
+static const struct clk_ops cdce706_pll_ops = {
+       .recalc_rate = cdce706_pll_recalc_rate,
+       .round_rate = cdce706_pll_round_rate,
+       .set_rate = cdce706_pll_set_rate,
+};
+
+static int cdce706_divider_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       if (hwd->parent == index)
+               return 0;
+       hwd->parent = index;
+       return cdce706_reg_update(hwd->dev_data,
+                                 CDCE706_DIVIDER_PLL(hwd->idx),
+                                 CDCE706_DIVIDER_PLL_MASK(hwd->idx),
+                                 index << CDCE706_DIVIDER_PLL_SHIFT(hwd->idx));
+}
+
+static u8 cdce706_divider_get_parent(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       return hwd->parent;
+}
+
+static unsigned long cdce706_divider_recalc_rate(struct clk_hw *hw,
+                                                unsigned long parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, divider: %d, div: %u\n",
+               __func__, hwd->idx, hwd->div);
+       if (hwd->div)
+               return parent_rate / hwd->div;
+       return 0;
+}
+
+static long cdce706_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long *parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+       struct cdce706_dev_data *cdce = hwd->dev_data;
+       unsigned long mul, div;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, rate: %lu, parent_rate: %lu\n",
+               __func__, rate, *parent_rate);
+
+       rational_best_approximation(rate, *parent_rate,
+                                   1, CDCE706_DIVIDER_DIVIDER_MAX,
+                                   &mul, &div);
+       if (!mul)
+               div = CDCE706_DIVIDER_DIVIDER_MAX;
+
+       if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+               unsigned long best_diff = rate;
+               unsigned long best_div = 0;
+               struct clk *gp_clk = cdce->clkin_clk[cdce->clkin[0].parent];
+               unsigned long gp_rate = gp_clk ? clk_get_rate(gp_clk) : 0;
+
+               for (div = CDCE706_PLL_FREQ_MIN / rate; best_diff &&
+                    div <= CDCE706_PLL_FREQ_MAX / rate; ++div) {
+                       unsigned long n, m;
+                       unsigned long diff;
+                       unsigned long div_rate;
+                       u64 div_rate64;
+
+                       if (rate * div < CDCE706_PLL_FREQ_MIN)
+                               continue;
+
+                       rational_best_approximation(rate * div, gp_rate,
+                                                   CDCE706_PLL_N_MAX,
+                                                   CDCE706_PLL_M_MAX,
+                                                   &n, &m);
+                       div_rate64 = (u64)gp_rate * n;
+                       do_div(div_rate64, m);
+                       do_div(div_rate64, div);
+                       div_rate = div_rate64;
+                       diff = max(div_rate, rate) - min(div_rate, rate);
+
+                       if (diff < best_diff) {
+                               best_diff = diff;
+                               best_div = div;
+                               dev_dbg(&hwd->dev_data->client->dev,
+                                       "%s, %lu * %lu / %lu / %lu = %lu\n",
+                                       __func__, gp_rate, n, m, div, div_rate);
+                       }
+               }
+
+               div = best_div;
+
+               dev_dbg(&hwd->dev_data->client->dev,
+                       "%s, altering parent rate: %lu -> %lu\n",
+                       __func__, *parent_rate, rate * div);
+               *parent_rate = rate * div;
+       }
+       hwd->div = div;
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, divider: %d, div: %lu\n",
+               __func__, hwd->idx, div);
+
+       return *parent_rate / div;
+}
+
+static int cdce706_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+                                   unsigned long parent_rate)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       dev_dbg(&hwd->dev_data->client->dev,
+               "%s, divider: %d, div: %u\n",
+               __func__, hwd->idx, hwd->div);
+
+       return cdce706_reg_update(hwd->dev_data,
+                                 CDCE706_DIVIDER(hwd->idx),
+                                 CDCE706_DIVIDER_DIVIDER_MASK,
+                                 hwd->div);
+}
+
+static const struct clk_ops cdce706_divider_ops = {
+       .set_parent = cdce706_divider_set_parent,
+       .get_parent = cdce706_divider_get_parent,
+       .recalc_rate = cdce706_divider_recalc_rate,
+       .round_rate = cdce706_divider_round_rate,
+       .set_rate = cdce706_divider_set_rate,
+};
+
+static int cdce706_clkout_prepare(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       return cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
+                                 CDCE706_CLKOUT_ENABLE_MASK,
+                                 CDCE706_CLKOUT_ENABLE_MASK);
+}
+
+static void cdce706_clkout_unprepare(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       cdce706_reg_update(hwd->dev_data, CDCE706_CLKOUT(hwd->idx),
+                          CDCE706_CLKOUT_ENABLE_MASK, 0);
+}
+
+static int cdce706_clkout_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       if (hwd->parent == index)
+               return 0;
+       hwd->parent = index;
+       return cdce706_reg_update(hwd->dev_data,
+                                 CDCE706_CLKOUT(hwd->idx),
+                                 CDCE706_CLKOUT_ENABLE_MASK, index);
+}
+
+static u8 cdce706_clkout_get_parent(struct clk_hw *hw)
+{
+       struct cdce706_hw_data *hwd = to_hw_data(hw);
+
+       return hwd->parent;
+}
+
+static unsigned long cdce706_clkout_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       return parent_rate;
+}
+
+static long cdce706_clkout_round_rate(struct clk_hw *hw, unsigned long rate,
+                                     unsigned long *parent_rate)
+{
+       *parent_rate = rate;
+       return rate;
+}
+
+static int cdce706_clkout_set_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long parent_rate)
+{
+       return 0;
+}
+
+static const struct clk_ops cdce706_clkout_ops = {
+       .prepare = cdce706_clkout_prepare,
+       .unprepare = cdce706_clkout_unprepare,
+       .set_parent = cdce706_clkout_set_parent,
+       .get_parent = cdce706_clkout_get_parent,
+       .recalc_rate = cdce706_clkout_recalc_rate,
+       .round_rate = cdce706_clkout_round_rate,
+       .set_rate = cdce706_clkout_set_rate,
+};
+
+static int cdce706_register_hw(struct cdce706_dev_data *cdce,
+                              struct cdce706_hw_data *hw, unsigned num_hw,
+                              const char * const *clk_names,
+                              struct clk_init_data *init)
+{
+       unsigned i;
+
+       for (i = 0; i < num_hw; ++i, ++hw) {
+               init->name = clk_names[i];
+               hw->dev_data = cdce;
+               hw->idx = i;
+               hw->hw.init = init;
+               hw->clk = devm_clk_register(&cdce->client->dev,
+                                           &hw->hw);
+               if (IS_ERR(hw->clk)) {
+                       dev_err(&cdce->client->dev, "Failed to register %s\n",
+                               clk_names[i]);
+                       return PTR_ERR(hw->clk);
+               }
+       }
+       return 0;
+}
+
+static int cdce706_register_clkin(struct cdce706_dev_data *cdce)
+{
+       struct clk_init_data init = {
+               .ops = &cdce706_clkin_ops,
+               .parent_names = cdce->clkin_name,
+               .num_parents = ARRAY_SIZE(cdce->clkin_name),
+       };
+       unsigned i;
+       int ret;
+       unsigned clock, source;
+
+       for (i = 0; i < ARRAY_SIZE(cdce->clkin_name); ++i) {
+               struct clk *parent = devm_clk_get(&cdce->client->dev,
+                                                 cdce706_source_name[i]);
+
+               if (IS_ERR(parent)) {
+                       cdce->clkin_name[i] = cdce706_source_name[i];
+               } else {
+                       cdce->clkin_name[i] = __clk_get_name(parent);
+                       cdce->clkin_clk[i] = parent;
+               }
+       }
+
+       ret = cdce706_reg_read(cdce, CDCE706_CLKIN_SOURCE, &source);
+       if (ret < 0)
+               return ret;
+       if ((source & CDCE706_CLKIN_SOURCE_MASK) ==
+           CDCE706_CLKIN_SOURCE_LVCMOS) {
+               ret = cdce706_reg_read(cdce, CDCE706_CLKIN_CLOCK, &clock);
+               if (ret < 0)
+                       return ret;
+               cdce->clkin[0].parent = !!(clock & CDCE706_CLKIN_CLOCK_MASK);
+       }
+
+       ret = cdce706_register_hw(cdce, cdce->clkin,
+                                 ARRAY_SIZE(cdce->clkin),
+                                 cdce706_clkin_name, &init);
+       return ret;
+}
+
+static int cdce706_register_plls(struct cdce706_dev_data *cdce)
+{
+       struct clk_init_data init = {
+               .ops = &cdce706_pll_ops,
+               .parent_names = cdce706_clkin_name,
+               .num_parents = ARRAY_SIZE(cdce706_clkin_name),
+       };
+       unsigned i;
+       int ret;
+       unsigned mux;
+
+       ret = cdce706_reg_read(cdce, CDCE706_PLL_MUX, &mux);
+       if (ret < 0)
+               return ret;
+
+       for (i = 0; i < ARRAY_SIZE(cdce->pll); ++i) {
+               unsigned m, n, v;
+
+               ret = cdce706_reg_read(cdce, CDCE706_PLL_M_LOW(i), &m);
+               if (ret < 0)
+                       return ret;
+               ret = cdce706_reg_read(cdce, CDCE706_PLL_N_LOW(i), &n);
+               if (ret < 0)
+                       return ret;
+               ret = cdce706_reg_read(cdce, CDCE706_PLL_HI(i), &v);
+               if (ret < 0)
+                       return ret;
+               cdce->pll[i].div = m | ((v & CDCE706_PLL_HI_M_MASK) << 8);
+               cdce->pll[i].mul = n | ((v & CDCE706_PLL_HI_N_MASK) <<
+                                       (8 - CDCE706_PLL_HI_N_SHIFT));
+               cdce->pll[i].mux = mux & CDCE706_PLL_MUX_MASK(i);
+               dev_dbg(&cdce->client->dev,
+                       "%s: i: %u, div: %u, mul: %u, mux: %d\n", __func__, i,
+                       cdce->pll[i].div, cdce->pll[i].mul, cdce->pll[i].mux);
+       }
+
+       ret = cdce706_register_hw(cdce, cdce->pll,
+                                 ARRAY_SIZE(cdce->pll),
+                                 cdce706_pll_name, &init);
+       return ret;
+}
+
+static int cdce706_register_dividers(struct cdce706_dev_data *cdce)
+{
+       struct clk_init_data init = {
+               .ops = &cdce706_divider_ops,
+               .parent_names = cdce706_divider_parent_name,
+               .num_parents = ARRAY_SIZE(cdce706_divider_parent_name),
+               .flags = CLK_SET_RATE_PARENT,
+       };
+       unsigned i;
+       int ret;
+
+       for (i = 0; i < ARRAY_SIZE(cdce->divider); ++i) {
+               unsigned val;
+
+               ret = cdce706_reg_read(cdce, CDCE706_DIVIDER_PLL(i), &val);
+               if (ret < 0)
+                       return ret;
+               cdce->divider[i].parent =
+                       (val & CDCE706_DIVIDER_PLL_MASK(i)) >>
+                       CDCE706_DIVIDER_PLL_SHIFT(i);
+
+               ret = cdce706_reg_read(cdce, CDCE706_DIVIDER(i), &val);
+               if (ret < 0)
+                       return ret;
+               cdce->divider[i].div = val & CDCE706_DIVIDER_DIVIDER_MASK;
+               dev_dbg(&cdce->client->dev,
+                       "%s: i: %u, parent: %u, div: %u\n", __func__, i,
+                       cdce->divider[i].parent, cdce->divider[i].div);
+       }
+
+       ret = cdce706_register_hw(cdce, cdce->divider,
+                                 ARRAY_SIZE(cdce->divider),
+                                 cdce706_divider_name, &init);
+       return ret;
+}
+
+static int cdce706_register_clkouts(struct cdce706_dev_data *cdce)
+{
+       struct clk_init_data init = {
+               .ops = &cdce706_clkout_ops,
+               .parent_names = cdce706_divider_name,
+               .num_parents = ARRAY_SIZE(cdce706_divider_name),
+               .flags = CLK_SET_RATE_PARENT,
+       };
+       unsigned i;
+       int ret;
+
+       for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i) {
+               unsigned val;
+
+               ret = cdce706_reg_read(cdce, CDCE706_CLKOUT(i), &val);
+               if (ret < 0)
+                       return ret;
+               cdce->clkout[i].parent = val & CDCE706_CLKOUT_DIVIDER_MASK;
+               dev_dbg(&cdce->client->dev,
+                       "%s: i: %u, parent: %u\n", __func__, i,
+                       cdce->clkout[i].parent);
+       }
+
+       ret = cdce706_register_hw(cdce, cdce->clkout,
+                                 ARRAY_SIZE(cdce->clkout),
+                                 cdce706_clkout_name, &init);
+       for (i = 0; i < ARRAY_SIZE(cdce->clkout); ++i)
+               cdce->clks[i] = cdce->clkout[i].clk;
+
+       return ret;
+}
+
+static int cdce706_probe(struct i2c_client *client,
+                        const struct i2c_device_id *id)
+{
+       struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+       struct cdce706_dev_data *cdce;
+       int ret;
+
+       if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+               return -EIO;
+
+       cdce = devm_kzalloc(&client->dev, sizeof(*cdce), GFP_KERNEL);
+       if (!cdce)
+               return -ENOMEM;
+
+       cdce->client = client;
+       cdce->regmap = devm_regmap_init_i2c(client, &cdce706_regmap_config);
+       if (IS_ERR(cdce->regmap)) {
+               dev_err(&client->dev, "Failed to initialize regmap\n");
+               return -EINVAL;
+       }
+
+       i2c_set_clientdata(client, cdce);
+
+       ret = cdce706_register_clkin(cdce);
+       if (ret < 0)
+               return ret;
+       ret = cdce706_register_plls(cdce);
+       if (ret < 0)
+               return ret;
+       ret = cdce706_register_dividers(cdce);
+       if (ret < 0)
+               return ret;
+       ret = cdce706_register_clkouts(cdce);
+       if (ret < 0)
+               return ret;
+       cdce->onecell.clks = cdce->clks;
+       cdce->onecell.clk_num = ARRAY_SIZE(cdce->clks);
+       ret = of_clk_add_provider(client->dev.of_node, of_clk_src_onecell_get,
+                                 &cdce->onecell);
+
+       return ret;
+}
+
+static int cdce706_remove(struct i2c_client *client)
+{
+       return 0;
+}
+
+
+#ifdef CONFIG_OF
+static const struct of_device_id cdce706_dt_match[] = {
+       { .compatible = "ti,cdce706" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, cdce706_dt_match);
+#endif
+
+static const struct i2c_device_id cdce706_id[] = {
+       { "cdce706", 0 },
+       { }
+};
+MODULE_DEVICE_TABLE(i2c, cdce706_id);
+
+static struct i2c_driver cdce706_i2c_driver = {
+       .driver = {
+               .name   = "cdce706",
+               .of_match_table = of_match_ptr(cdce706_dt_match),
+       },
+       .probe          = cdce706_probe,
+       .remove         = cdce706_remove,
+       .id_table       = cdce706_id,
+};
+module_i2c_driver(cdce706_i2c_driver);
+
+MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
+MODULE_DESCRIPTION("TI CDCE 706 clock synthesizer driver");
+MODULE_LICENSE("GPL");
index 4386697236a78dc23aea66d0c4792873d558f71e..956b7e54fa1c5f4f3583ac642c3f3f2ae255fee2 100644 (file)
@@ -27,7 +27,7 @@ static u8 clk_composite_get_parent(struct clk_hw *hw)
        const struct clk_ops *mux_ops = composite->mux_ops;
        struct clk_hw *mux_hw = composite->mux_hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return mux_ops->get_parent(mux_hw);
 }
@@ -38,7 +38,7 @@ static int clk_composite_set_parent(struct clk_hw *hw, u8 index)
        const struct clk_ops *mux_ops = composite->mux_ops;
        struct clk_hw *mux_hw = composite->mux_hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return mux_ops->set_parent(mux_hw, index);
 }
@@ -50,12 +50,14 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
        const struct clk_ops *rate_ops = composite->rate_ops;
        struct clk_hw *rate_hw = composite->rate_hw;
 
-       rate_hw->clk = hw->clk;
+       __clk_hw_set_clk(rate_hw, hw);
 
        return rate_ops->recalc_rate(rate_hw, parent_rate);
 }
 
 static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_p)
 {
@@ -72,8 +74,10 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
        int i;
 
        if (rate_hw && rate_ops && rate_ops->determine_rate) {
-               rate_hw->clk = hw->clk;
-               return rate_ops->determine_rate(rate_hw, rate, best_parent_rate,
+               __clk_hw_set_clk(rate_hw, hw);
+               return rate_ops->determine_rate(rate_hw, rate, min_rate,
+                                               max_rate,
+                                               best_parent_rate,
                                                best_parent_p);
        } else if (rate_hw && rate_ops && rate_ops->round_rate &&
                   mux_hw && mux_ops && mux_ops->set_parent) {
@@ -116,8 +120,9 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
 
                return best_rate;
        } else if (mux_hw && mux_ops && mux_ops->determine_rate) {
-               mux_hw->clk = hw->clk;
-               return mux_ops->determine_rate(mux_hw, rate, best_parent_rate,
+               __clk_hw_set_clk(mux_hw, hw);
+               return mux_ops->determine_rate(mux_hw, rate, min_rate,
+                                              max_rate, best_parent_rate,
                                               best_parent_p);
        } else {
                pr_err("clk: clk_composite_determine_rate function called, but no mux or rate callback set!\n");
@@ -132,7 +137,7 @@ static long clk_composite_round_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *rate_ops = composite->rate_ops;
        struct clk_hw *rate_hw = composite->rate_hw;
 
-       rate_hw->clk = hw->clk;
+       __clk_hw_set_clk(rate_hw, hw);
 
        return rate_ops->round_rate(rate_hw, rate, prate);
 }
@@ -144,7 +149,7 @@ static int clk_composite_set_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *rate_ops = composite->rate_ops;
        struct clk_hw *rate_hw = composite->rate_hw;
 
-       rate_hw->clk = hw->clk;
+       __clk_hw_set_clk(rate_hw, hw);
 
        return rate_ops->set_rate(rate_hw, rate, parent_rate);
 }
@@ -155,7 +160,7 @@ static int clk_composite_is_enabled(struct clk_hw *hw)
        const struct clk_ops *gate_ops = composite->gate_ops;
        struct clk_hw *gate_hw = composite->gate_hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        return gate_ops->is_enabled(gate_hw);
 }
@@ -166,7 +171,7 @@ static int clk_composite_enable(struct clk_hw *hw)
        const struct clk_ops *gate_ops = composite->gate_ops;
        struct clk_hw *gate_hw = composite->gate_hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        return gate_ops->enable(gate_hw);
 }
@@ -177,7 +182,7 @@ static void clk_composite_disable(struct clk_hw *hw)
        const struct clk_ops *gate_ops = composite->gate_ops;
        struct clk_hw *gate_hw = composite->gate_hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        gate_ops->disable(gate_hw);
 }
index c0a842b335c520c6c28f08308a1b62a743038dd3..db7f8bce7467a2abfd37f6ccb8e22c784a6bce28 100644 (file)
@@ -30,7 +30,7 @@
 
 #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
 
-#define div_mask(d)    ((1 << ((d)->width)) - 1)
+#define div_mask(width)        ((1 << (width)) - 1)
 
 static unsigned int _get_table_maxdiv(const struct clk_div_table *table)
 {
@@ -54,15 +54,16 @@ static unsigned int _get_table_mindiv(const struct clk_div_table *table)
        return mindiv;
 }
 
-static unsigned int _get_maxdiv(struct clk_divider *divider)
+static unsigned int _get_maxdiv(const struct clk_div_table *table, u8 width,
+                               unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ONE_BASED)
-               return div_mask(divider);
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
-               return 1 << div_mask(divider);
-       if (divider->table)
-               return _get_table_maxdiv(divider->table);
-       return div_mask(divider) + 1;
+       if (flags & CLK_DIVIDER_ONE_BASED)
+               return div_mask(width);
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
+               return 1 << div_mask(width);
+       if (table)
+               return _get_table_maxdiv(table);
+       return div_mask(width) + 1;
 }
 
 static unsigned int _get_table_div(const struct clk_div_table *table,
@@ -76,14 +77,15 @@ static unsigned int _get_table_div(const struct clk_div_table *table,
        return 0;
 }
 
-static unsigned int _get_div(struct clk_divider *divider, unsigned int val)
+static unsigned int _get_div(const struct clk_div_table *table,
+                            unsigned int val, unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ONE_BASED)
+       if (flags & CLK_DIVIDER_ONE_BASED)
                return val;
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                return 1 << val;
-       if (divider->table)
-               return _get_table_div(divider->table, val);
+       if (table)
+               return _get_table_div(table, val);
        return val + 1;
 }
 
@@ -98,29 +100,28 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
        return 0;
 }
 
-static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
+static unsigned int _get_val(const struct clk_div_table *table,
+                            unsigned int div, unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ONE_BASED)
+       if (flags & CLK_DIVIDER_ONE_BASED)
                return div;
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                return __ffs(div);
-       if (divider->table)
-               return  _get_table_val(divider->table, div);
+       if (table)
+               return  _get_table_val(table, div);
        return div - 1;
 }
 
-static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
-               unsigned long parent_rate)
+unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+                                 unsigned int val,
+                                 const struct clk_div_table *table,
+                                 unsigned long flags)
 {
-       struct clk_divider *divider = to_clk_divider(hw);
-       unsigned int div, val;
+       unsigned int div;
 
-       val = clk_readl(divider->reg) >> divider->shift;
-       val &= div_mask(divider);
-
-       div = _get_div(divider, val);
+       div = _get_div(table, val, flags);
        if (!div) {
-               WARN(!(divider->flags & CLK_DIVIDER_ALLOW_ZERO),
+               WARN(!(flags & CLK_DIVIDER_ALLOW_ZERO),
                        "%s: Zero divisor and CLK_DIVIDER_ALLOW_ZERO not set\n",
                        __clk_get_name(hw->clk));
                return parent_rate;
@@ -128,6 +129,20 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
 
        return DIV_ROUND_UP(parent_rate, div);
 }
+EXPORT_SYMBOL_GPL(divider_recalc_rate);
+
+static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
+               unsigned long parent_rate)
+{
+       struct clk_divider *divider = to_clk_divider(hw);
+       unsigned int val;
+
+       val = clk_readl(divider->reg) >> divider->shift;
+       val &= div_mask(divider->width);
+
+       return divider_recalc_rate(hw, parent_rate, val, divider->table,
+                                  divider->flags);
+}
 
 /*
  * The reverse of DIV_ROUND_UP: The maximum number which
@@ -146,12 +161,13 @@ static bool _is_valid_table_div(const struct clk_div_table *table,
        return false;
 }
 
-static bool _is_valid_div(struct clk_divider *divider, unsigned int div)
+static bool _is_valid_div(const struct clk_div_table *table, unsigned int div,
+                         unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                return is_power_of_2(div);
-       if (divider->table)
-               return _is_valid_table_div(divider->table, div);
+       if (table)
+               return _is_valid_table_div(table, div);
        return true;
 }
 
@@ -191,71 +207,76 @@ static int _round_down_table(const struct clk_div_table *table, int div)
        return down;
 }
 
-static int _div_round_up(struct clk_divider *divider,
-               unsigned long parent_rate, unsigned long rate)
+static int _div_round_up(const struct clk_div_table *table,
+                        unsigned long parent_rate, unsigned long rate,
+                        unsigned long flags)
 {
        int div = DIV_ROUND_UP(parent_rate, rate);
 
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                div = __roundup_pow_of_two(div);
-       if (divider->table)
-               div = _round_up_table(divider->table, div);
+       if (table)
+               div = _round_up_table(table, div);
 
        return div;
 }
 
-static int _div_round_closest(struct clk_divider *divider,
-               unsigned long parent_rate, unsigned long rate)
+static int _div_round_closest(const struct clk_div_table *table,
+                             unsigned long parent_rate, unsigned long rate,
+                             unsigned long flags)
 {
        int up, down, div;
 
        up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate);
 
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO) {
+       if (flags & CLK_DIVIDER_POWER_OF_TWO) {
                up = __roundup_pow_of_two(div);
                down = __rounddown_pow_of_two(div);
-       } else if (divider->table) {
-               up = _round_up_table(divider->table, div);
-               down = _round_down_table(divider->table, div);
+       } else if (table) {
+               up = _round_up_table(table, div);
+               down = _round_down_table(table, div);
        }
 
        return (up - div) <= (div - down) ? up : down;
 }
 
-static int _div_round(struct clk_divider *divider, unsigned long parent_rate,
-               unsigned long rate)
+static int _div_round(const struct clk_div_table *table,
+                     unsigned long parent_rate, unsigned long rate,
+                     unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST)
-               return _div_round_closest(divider, parent_rate, rate);
+       if (flags & CLK_DIVIDER_ROUND_CLOSEST)
+               return _div_round_closest(table, parent_rate, rate, flags);
 
-       return _div_round_up(divider, parent_rate, rate);
+       return _div_round_up(table, parent_rate, rate, flags);
 }
 
-static bool _is_best_div(struct clk_divider *divider,
-               unsigned long rate, unsigned long now, unsigned long best)
+static bool _is_best_div(unsigned long rate, unsigned long now,
+                        unsigned long best, unsigned long flags)
 {
-       if (divider->flags & CLK_DIVIDER_ROUND_CLOSEST)
+       if (flags & CLK_DIVIDER_ROUND_CLOSEST)
                return abs(rate - now) < abs(rate - best);
 
        return now <= rate && now > best;
 }
 
-static int _next_div(struct clk_divider *divider, int div)
+static int _next_div(const struct clk_div_table *table, int div,
+                    unsigned long flags)
 {
        div++;
 
-       if (divider->flags & CLK_DIVIDER_POWER_OF_TWO)
+       if (flags & CLK_DIVIDER_POWER_OF_TWO)
                return __roundup_pow_of_two(div);
-       if (divider->table)
-               return _round_up_table(divider->table, div);
+       if (table)
+               return _round_up_table(table, div);
 
        return div;
 }
 
 static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
-               unsigned long *best_parent_rate)
+                              unsigned long *best_parent_rate,
+                              const struct clk_div_table *table, u8 width,
+                              unsigned long flags)
 {
-       struct clk_divider *divider = to_clk_divider(hw);
        int i, bestdiv = 0;
        unsigned long parent_rate, best = 0, now, maxdiv;
        unsigned long parent_rate_saved = *best_parent_rate;
@@ -263,19 +284,11 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
        if (!rate)
                rate = 1;
 
-       /* if read only, just return current value */
-       if (divider->flags & CLK_DIVIDER_READ_ONLY) {
-               bestdiv = readl(divider->reg) >> divider->shift;
-               bestdiv &= div_mask(divider);
-               bestdiv = _get_div(divider, bestdiv);
-               return bestdiv;
-       }
-
-       maxdiv = _get_maxdiv(divider);
+       maxdiv = _get_maxdiv(table, width, flags);
 
        if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
                parent_rate = *best_parent_rate;
-               bestdiv = _div_round(divider, parent_rate, rate);
+               bestdiv = _div_round(table, parent_rate, rate, flags);
                bestdiv = bestdiv == 0 ? 1 : bestdiv;
                bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
                return bestdiv;
@@ -287,8 +300,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
         */
        maxdiv = min(ULONG_MAX / rate, maxdiv);
 
-       for (i = 1; i <= maxdiv; i = _next_div(divider, i)) {
-               if (!_is_valid_div(divider, i))
+       for (i = 1; i <= maxdiv; i = _next_div(table, i, flags)) {
+               if (!_is_valid_div(table, i, flags))
                        continue;
                if (rate * i == parent_rate_saved) {
                        /*
@@ -302,7 +315,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
                parent_rate = __clk_round_rate(__clk_get_parent(hw->clk),
                                MULT_ROUND_UP(rate, i));
                now = DIV_ROUND_UP(parent_rate, i);
-               if (_is_best_div(divider, rate, now, best)) {
+               if (_is_best_div(rate, now, best, flags)) {
                        bestdiv = i;
                        best = now;
                        *best_parent_rate = parent_rate;
@@ -310,48 +323,79 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
        }
 
        if (!bestdiv) {
-               bestdiv = _get_maxdiv(divider);
+               bestdiv = _get_maxdiv(table, width, flags);
                *best_parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), 1);
        }
 
        return bestdiv;
 }
 
-static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
-                               unsigned long *prate)
+long divider_round_rate(struct clk_hw *hw, unsigned long rate,
+                       unsigned long *prate, const struct clk_div_table *table,
+                       u8 width, unsigned long flags)
 {
        int div;
-       div = clk_divider_bestdiv(hw, rate, prate);
+
+       div = clk_divider_bestdiv(hw, rate, prate, table, width, flags);
 
        return DIV_ROUND_UP(*prate, div);
 }
+EXPORT_SYMBOL_GPL(divider_round_rate);
 
-static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
-                               unsigned long parent_rate)
+static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long *prate)
 {
        struct clk_divider *divider = to_clk_divider(hw);
+       int bestdiv;
+
+       /* if read only, just return current value */
+       if (divider->flags & CLK_DIVIDER_READ_ONLY) {
+               bestdiv = readl(divider->reg) >> divider->shift;
+               bestdiv &= div_mask(divider->width);
+               bestdiv = _get_div(divider->table, bestdiv, divider->flags);
+               return bestdiv;
+       }
+
+       return divider_round_rate(hw, rate, prate, divider->table,
+                                 divider->width, divider->flags);
+}
+
+int divider_get_val(unsigned long rate, unsigned long parent_rate,
+                   const struct clk_div_table *table, u8 width,
+                   unsigned long flags)
+{
        unsigned int div, value;
-       unsigned long flags = 0;
-       u32 val;
 
        div = DIV_ROUND_UP(parent_rate, rate);
 
-       if (!_is_valid_div(divider, div))
+       if (!_is_valid_div(table, div, flags))
                return -EINVAL;
 
-       value = _get_val(divider, div);
+       value = _get_val(table, div, flags);
+
+       return min_t(unsigned int, value, div_mask(width));
+}
+EXPORT_SYMBOL_GPL(divider_get_val);
+
+static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long parent_rate)
+{
+       struct clk_divider *divider = to_clk_divider(hw);
+       unsigned int value;
+       unsigned long flags = 0;
+       u32 val;
 
-       if (value > div_mask(divider))
-               value = div_mask(divider);
+       value = divider_get_val(rate, parent_rate, divider->table,
+                               divider->width, divider->flags);
 
        if (divider->lock)
                spin_lock_irqsave(divider->lock, flags);
 
        if (divider->flags & CLK_DIVIDER_HIWORD_MASK) {
-               val = div_mask(divider) << (divider->shift + 16);
+               val = div_mask(divider->width) << (divider->shift + 16);
        } else {
                val = clk_readl(divider->reg);
-               val &= ~(div_mask(divider) << divider->shift);
+               val &= ~(div_mask(divider->width) << divider->shift);
        }
        val |= value << divider->shift;
        clk_writel(val, divider->reg);
@@ -463,3 +507,19 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
                        width, clk_divider_flags, table, lock);
 }
 EXPORT_SYMBOL_GPL(clk_register_divider_table);
+
+void clk_unregister_divider(struct clk *clk)
+{
+       struct clk_divider *div;
+       struct clk_hw *hw;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       div = to_clk_divider(hw);
+
+       clk_unregister(clk);
+       kfree(div);
+}
+EXPORT_SYMBOL_GPL(clk_unregister_divider);
index 51fd87fb7ba691e8a52e40ddc1ee4524ecd3e781..3f0e4200cb5d4ca4a680c78479ac86ed116766a5 100644 (file)
@@ -128,7 +128,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
        struct clk_init_data init;
 
        if (clk_gate_flags & CLK_GATE_HIWORD_MASK) {
-               if (bit_idx > 16) {
+               if (bit_idx > 15) {
                        pr_err("gate bit exceeds LOWORD field\n");
                        return ERR_PTR(-EINVAL);
                }
@@ -162,3 +162,19 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
        return clk;
 }
 EXPORT_SYMBOL_GPL(clk_register_gate);
+
+void clk_unregister_gate(struct clk *clk)
+{
+       struct clk_gate *gate;
+       struct clk_hw *hw;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       gate = to_clk_gate(hw);
+
+       clk_unregister(clk);
+       kfree(gate);
+}
+EXPORT_SYMBOL_GPL(clk_unregister_gate);
index 6e1ecf94bf58daa279cb47e42065da9a9db3c581..69a094c3783d8eb2a2c0d3624f3a641f97a5d484 100644 (file)
@@ -177,3 +177,19 @@ struct clk *clk_register_mux(struct device *dev, const char *name,
                                      NULL, lock);
 }
 EXPORT_SYMBOL_GPL(clk_register_mux);
+
+void clk_unregister_mux(struct clk *clk)
+{
+       struct clk_mux *mux;
+       struct clk_hw *hw;
+
+       hw = __clk_get_hw(clk);
+       if (!hw)
+               return;
+
+       mux = to_clk_mux(hw);
+
+       clk_unregister(clk);
+       kfree(mux);
+}
+EXPORT_SYMBOL_GPL(clk_unregister_mux);
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
deleted file mode 100644 (file)
index 0a47d6f..0000000
+++ /dev/null
@@ -1,306 +0,0 @@
-/*
- * Copyright 2013 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * clock driver for Freescale PowerPC corenet SoCs.
- */
-#include <linux/clk-provider.h>
-#include <linux/io.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/of_address.h>
-#include <linux/of_platform.h>
-#include <linux/of.h>
-#include <linux/slab.h>
-
-struct cmux_clk {
-       struct clk_hw hw;
-       void __iomem *reg;
-       u32 flags;
-};
-
-#define PLL_KILL                       BIT(31)
-#define        CLKSEL_SHIFT            27
-#define CLKSEL_ADJUST          BIT(0)
-#define to_cmux_clk(p)         container_of(p, struct cmux_clk, hw)
-
-static unsigned int clocks_per_pll;
-
-static int cmux_set_parent(struct clk_hw *hw, u8 idx)
-{
-       struct cmux_clk *clk = to_cmux_clk(hw);
-       u32 clksel;
-
-       clksel = ((idx / clocks_per_pll) << 2) + idx % clocks_per_pll;
-       if (clk->flags & CLKSEL_ADJUST)
-               clksel += 8;
-       clksel = (clksel & 0xf) << CLKSEL_SHIFT;
-       iowrite32be(clksel, clk->reg);
-
-       return 0;
-}
-
-static u8 cmux_get_parent(struct clk_hw *hw)
-{
-       struct cmux_clk *clk = to_cmux_clk(hw);
-       u32 clksel;
-
-       clksel = ioread32be(clk->reg);
-       clksel = (clksel >> CLKSEL_SHIFT) & 0xf;
-       if (clk->flags & CLKSEL_ADJUST)
-               clksel -= 8;
-       clksel = (clksel >> 2) * clocks_per_pll + clksel % 4;
-
-       return clksel;
-}
-
-const struct clk_ops cmux_ops = {
-       .get_parent = cmux_get_parent,
-       .set_parent = cmux_set_parent,
-};
-
-static void __init core_mux_init(struct device_node *np)
-{
-       struct clk *clk;
-       struct clk_init_data init;
-       struct cmux_clk *cmux_clk;
-       struct device_node *node;
-       int rc, count, i;
-       u32     offset;
-       const char *clk_name;
-       const char **parent_names;
-
-       rc = of_property_read_u32(np, "reg", &offset);
-       if (rc) {
-               pr_err("%s: could not get reg property\n", np->name);
-               return;
-       }
-
-       /* get the input clock source count */
-       count = of_property_count_strings(np, "clock-names");
-       if (count < 0) {
-               pr_err("%s: get clock count error\n", np->name);
-               return;
-       }
-       parent_names = kzalloc((sizeof(char *) * count), GFP_KERNEL);
-       if (!parent_names) {
-               pr_err("%s: could not allocate parent_names\n", __func__);
-               return;
-       }
-
-       for (i = 0; i < count; i++)
-               parent_names[i] = of_clk_get_parent_name(np, i);
-
-       cmux_clk = kzalloc(sizeof(struct cmux_clk), GFP_KERNEL);
-       if (!cmux_clk) {
-               pr_err("%s: could not allocate cmux_clk\n", __func__);
-               goto err_name;
-       }
-       cmux_clk->reg = of_iomap(np, 0);
-       if (!cmux_clk->reg) {
-               pr_err("%s: could not map register\n", __func__);
-               goto err_clk;
-       }
-
-       node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
-       if (node && (offset >= 0x80))
-               cmux_clk->flags = CLKSEL_ADJUST;
-
-       rc = of_property_read_string_index(np, "clock-output-names",
-                       0, &clk_name);
-       if (rc) {
-               pr_err("%s: read clock names error\n", np->name);
-               goto err_clk;
-       }
-
-       init.name = clk_name;
-       init.ops = &cmux_ops;
-       init.parent_names = parent_names;
-       init.num_parents = count;
-       init.flags = 0;
-       cmux_clk->hw.init = &init;
-
-       clk = clk_register(NULL, &cmux_clk->hw);
-       if (IS_ERR(clk)) {
-               pr_err("%s: could not register clock\n", clk_name);
-               goto err_clk;
-       }
-
-       rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
-       if (rc) {
-               pr_err("Could not register clock provider for node:%s\n",
-                        np->name);
-               goto err_clk;
-       }
-       goto err_name;
-
-err_clk:
-       kfree(cmux_clk);
-err_name:
-       /* free *_names because they are reallocated when registered */
-       kfree(parent_names);
-}
-
-static void __init core_pll_init(struct device_node *np)
-{
-       u32 mult;
-       int i, rc, count;
-       const char *clk_name, *parent_name;
-       struct clk_onecell_data *onecell_data;
-       struct clk      **subclks;
-       void __iomem *base;
-
-       base = of_iomap(np, 0);
-       if (!base) {
-               pr_err("clk-ppc: iomap error\n");
-               return;
-       }
-
-       /* get the multiple of PLL */
-       mult = ioread32be(base);
-
-       /* check if this PLL is disabled */
-       if (mult & PLL_KILL) {
-               pr_debug("PLL:%s is disabled\n", np->name);
-               goto err_map;
-       }
-       mult = (mult >> 1) & 0x3f;
-
-       parent_name = of_clk_get_parent_name(np, 0);
-       if (!parent_name) {
-               pr_err("PLL: %s must have a parent\n", np->name);
-               goto err_map;
-       }
-
-       count = of_property_count_strings(np, "clock-output-names");
-       if (count < 0 || count > 4) {
-               pr_err("%s: clock is not supported\n", np->name);
-               goto err_map;
-       }
-
-       /* output clock number per PLL */
-       clocks_per_pll = count;
-
-       subclks = kzalloc(sizeof(struct clk *) * count, GFP_KERNEL);
-       if (!subclks) {
-               pr_err("%s: could not allocate subclks\n", __func__);
-               goto err_map;
-       }
-
-       onecell_data = kzalloc(sizeof(struct clk_onecell_data), GFP_KERNEL);
-       if (!onecell_data) {
-               pr_err("%s: could not allocate onecell_data\n", __func__);
-               goto err_clks;
-       }
-
-       for (i = 0; i < count; i++) {
-               rc = of_property_read_string_index(np, "clock-output-names",
-                               i, &clk_name);
-               if (rc) {
-                       pr_err("%s: could not get clock names\n", np->name);
-                       goto err_cell;
-               }
-
-               /*
-                * when count == 4, there are 4 output clocks:
-                * /1, /2, /3, /4 respectively
-                * when count < 4, there are at least 2 output clocks:
-                * /1, /2, (/4, if count == 3) respectively.
-                */
-               if (count == 4)
-                       subclks[i] = clk_register_fixed_factor(NULL, clk_name,
-                                       parent_name, 0, mult, 1 + i);
-               else
-
-                       subclks[i] = clk_register_fixed_factor(NULL, clk_name,
-                                       parent_name, 0, mult, 1 << i);
-
-               if (IS_ERR(subclks[i])) {
-                       pr_err("%s: could not register clock\n", clk_name);
-                       goto err_cell;
-               }
-       }
-
-       onecell_data->clks = subclks;
-       onecell_data->clk_num = count;
-
-       rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
-       if (rc) {
-               pr_err("Could not register clk provider for node:%s\n",
-                        np->name);
-               goto err_cell;
-       }
-
-       iounmap(base);
-       return;
-err_cell:
-       kfree(onecell_data);
-err_clks:
-       kfree(subclks);
-err_map:
-       iounmap(base);
-}
-
-static void __init sysclk_init(struct device_node *node)
-{
-       struct clk *clk;
-       const char *clk_name = node->name;
-       struct device_node *np = of_get_parent(node);
-       u32 rate;
-
-       if (!np) {
-               pr_err("ppc-clk: could not get parent node\n");
-               return;
-       }
-
-       if (of_property_read_u32(np, "clock-frequency", &rate)) {
-               of_node_put(node);
-               return;
-       }
-
-       of_property_read_string(np, "clock-output-names", &clk_name);
-
-       clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
-       if (!IS_ERR(clk))
-               of_clk_add_provider(np, of_clk_src_simple_get, clk);
-}
-
-static const struct of_device_id clk_match[] __initconst = {
-       { .compatible = "fsl,qoriq-sysclk-1.0", .data = sysclk_init, },
-       { .compatible = "fsl,qoriq-sysclk-2.0", .data = sysclk_init, },
-       { .compatible = "fsl,qoriq-core-pll-1.0", .data = core_pll_init, },
-       { .compatible = "fsl,qoriq-core-pll-2.0", .data = core_pll_init, },
-       { .compatible = "fsl,qoriq-core-mux-1.0", .data = core_mux_init, },
-       { .compatible = "fsl,qoriq-core-mux-2.0", .data = core_mux_init, },
-       {}
-};
-
-static int __init ppc_corenet_clk_probe(struct platform_device *pdev)
-{
-       of_clk_init(clk_match);
-
-       return 0;
-}
-
-static const struct of_device_id ppc_clk_ids[] __initconst = {
-       { .compatible = "fsl,qoriq-clockgen-1.0", },
-       { .compatible = "fsl,qoriq-clockgen-2.0", },
-       {}
-};
-
-static struct platform_driver ppc_corenet_clk_driver = {
-       .driver = {
-               .name = "ppc_corenet_clock",
-               .of_match_table = ppc_clk_ids,
-       },
-       .probe = ppc_corenet_clk_probe,
-};
-
-static int __init ppc_corenet_clk_init(void)
-{
-       return platform_driver_register(&ppc_corenet_clk_driver);
-}
-subsys_initcall(ppc_corenet_clk_init);
diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c
new file mode 100644 (file)
index 0000000..cda90a9
--- /dev/null
@@ -0,0 +1,362 @@
+/*
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * clock driver for Freescale QorIQ SoCs.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/clk-provider.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+struct cmux_clk {
+       struct clk_hw hw;
+       void __iomem *reg;
+       unsigned int clk_per_pll;
+       u32 flags;
+};
+
+#define PLL_KILL                       BIT(31)
+#define        CLKSEL_SHIFT            27
+#define CLKSEL_ADJUST          BIT(0)
+#define to_cmux_clk(p)         container_of(p, struct cmux_clk, hw)
+
+static int cmux_set_parent(struct clk_hw *hw, u8 idx)
+{
+       struct cmux_clk *clk = to_cmux_clk(hw);
+       u32 clksel;
+
+       clksel = ((idx / clk->clk_per_pll) << 2) + idx % clk->clk_per_pll;
+       if (clk->flags & CLKSEL_ADJUST)
+               clksel += 8;
+       clksel = (clksel & 0xf) << CLKSEL_SHIFT;
+       iowrite32be(clksel, clk->reg);
+
+       return 0;
+}
+
+static u8 cmux_get_parent(struct clk_hw *hw)
+{
+       struct cmux_clk *clk = to_cmux_clk(hw);
+       u32 clksel;
+
+       clksel = ioread32be(clk->reg);
+       clksel = (clksel >> CLKSEL_SHIFT) & 0xf;
+       if (clk->flags & CLKSEL_ADJUST)
+               clksel -= 8;
+       clksel = (clksel >> 2) * clk->clk_per_pll + clksel % 4;
+
+       return clksel;
+}
+
+static const struct clk_ops cmux_ops = {
+       .get_parent = cmux_get_parent,
+       .set_parent = cmux_set_parent,
+};
+
+static void __init core_mux_init(struct device_node *np)
+{
+       struct clk *clk;
+       struct clk_init_data init;
+       struct cmux_clk *cmux_clk;
+       struct device_node *node;
+       int rc, count, i;
+       u32     offset;
+       const char *clk_name;
+       const char **parent_names;
+       struct of_phandle_args clkspec;
+
+       rc = of_property_read_u32(np, "reg", &offset);
+       if (rc) {
+               pr_err("%s: could not get reg property\n", np->name);
+               return;
+       }
+
+       /* get the input clock source count */
+       count = of_property_count_strings(np, "clock-names");
+       if (count < 0) {
+               pr_err("%s: get clock count error\n", np->name);
+               return;
+       }
+       parent_names = kcalloc(count, sizeof(char *), GFP_KERNEL);
+       if (!parent_names)
+               return;
+
+       for (i = 0; i < count; i++)
+               parent_names[i] = of_clk_get_parent_name(np, i);
+
+       cmux_clk = kzalloc(sizeof(*cmux_clk), GFP_KERNEL);
+       if (!cmux_clk)
+               goto err_name;
+
+       cmux_clk->reg = of_iomap(np, 0);
+       if (!cmux_clk->reg) {
+               pr_err("%s: could not map register\n", __func__);
+               goto err_clk;
+       }
+
+       rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", 0,
+                                       &clkspec);
+       if (rc) {
+               pr_err("%s: parse clock node error\n", __func__);
+               goto err_clk;
+       }
+
+       cmux_clk->clk_per_pll = of_property_count_strings(clkspec.np,
+                       "clock-output-names");
+       of_node_put(clkspec.np);
+
+       node = of_find_compatible_node(NULL, NULL, "fsl,p4080-clockgen");
+       if (node && (offset >= 0x80))
+               cmux_clk->flags = CLKSEL_ADJUST;
+
+       rc = of_property_read_string_index(np, "clock-output-names",
+                                          0, &clk_name);
+       if (rc) {
+               pr_err("%s: read clock names error\n", np->name);
+               goto err_clk;
+       }
+
+       init.name = clk_name;
+       init.ops = &cmux_ops;
+       init.parent_names = parent_names;
+       init.num_parents = count;
+       init.flags = 0;
+       cmux_clk->hw.init = &init;
+
+       clk = clk_register(NULL, &cmux_clk->hw);
+       if (IS_ERR(clk)) {
+               pr_err("%s: could not register clock\n", clk_name);
+               goto err_clk;
+       }
+
+       rc = of_clk_add_provider(np, of_clk_src_simple_get, clk);
+       if (rc) {
+               pr_err("Could not register clock provider for node:%s\n",
+                      np->name);
+               goto err_clk;
+       }
+       goto err_name;
+
+err_clk:
+       kfree(cmux_clk);
+err_name:
+       /* free *_names because they are reallocated when registered */
+       kfree(parent_names);
+}
+
+static void __init core_pll_init(struct device_node *np)
+{
+       u32 mult;
+       int i, rc, count;
+       const char *clk_name, *parent_name;
+       struct clk_onecell_data *onecell_data;
+       struct clk      **subclks;
+       void __iomem *base;
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_err("iomap error\n");
+               return;
+       }
+
+       /* get the multiple of PLL */
+       mult = ioread32be(base);
+
+       /* check if this PLL is disabled */
+       if (mult & PLL_KILL) {
+               pr_debug("PLL:%s is disabled\n", np->name);
+               goto err_map;
+       }
+       mult = (mult >> 1) & 0x3f;
+
+       parent_name = of_clk_get_parent_name(np, 0);
+       if (!parent_name) {
+               pr_err("PLL: %s must have a parent\n", np->name);
+               goto err_map;
+       }
+
+       count = of_property_count_strings(np, "clock-output-names");
+       if (count < 0 || count > 4) {
+               pr_err("%s: clock is not supported\n", np->name);
+               goto err_map;
+       }
+
+       subclks = kcalloc(count, sizeof(struct clk *), GFP_KERNEL);
+       if (!subclks)
+               goto err_map;
+
+       onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL);
+       if (!onecell_data)
+               goto err_clks;
+
+       for (i = 0; i < count; i++) {
+               rc = of_property_read_string_index(np, "clock-output-names",
+                                                  i, &clk_name);
+               if (rc) {
+                       pr_err("%s: could not get clock names\n", np->name);
+                       goto err_cell;
+               }
+
+               /*
+                * when count == 4, there are 4 output clocks:
+                * /1, /2, /3, /4 respectively
+                * when count < 4, there are at least 2 output clocks:
+                * /1, /2, (/4, if count == 3) respectively.
+                */
+               if (count == 4)
+                       subclks[i] = clk_register_fixed_factor(NULL, clk_name,
+                                       parent_name, 0, mult, 1 + i);
+               else
+
+                       subclks[i] = clk_register_fixed_factor(NULL, clk_name,
+                                       parent_name, 0, mult, 1 << i);
+
+               if (IS_ERR(subclks[i])) {
+                       pr_err("%s: could not register clock\n", clk_name);
+                       goto err_cell;
+               }
+       }
+
+       onecell_data->clks = subclks;
+       onecell_data->clk_num = count;
+
+       rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data);
+       if (rc) {
+               pr_err("Could not register clk provider for node:%s\n",
+                      np->name);
+               goto err_cell;
+       }
+
+       iounmap(base);
+       return;
+err_cell:
+       kfree(onecell_data);
+err_clks:
+       kfree(subclks);
+err_map:
+       iounmap(base);
+}
+
+static void __init sysclk_init(struct device_node *node)
+{
+       struct clk *clk;
+       const char *clk_name = node->name;
+       struct device_node *np = of_get_parent(node);
+       u32 rate;
+
+       if (!np) {
+               pr_err("could not get parent node\n");
+               return;
+       }
+
+       if (of_property_read_u32(np, "clock-frequency", &rate)) {
+               of_node_put(node);
+               return;
+       }
+
+       of_property_read_string(np, "clock-output-names", &clk_name);
+
+       clk = clk_register_fixed_rate(NULL, clk_name, NULL, CLK_IS_ROOT, rate);
+       if (!IS_ERR(clk))
+               of_clk_add_provider(np, of_clk_src_simple_get, clk);
+}
+
+static void __init pltfrm_pll_init(struct device_node *np)
+{
+       void __iomem *base;
+       uint32_t mult;
+       const char *parent_name, *clk_name;
+       int i, _errno;
+       struct clk_onecell_data *cod;
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name);
+               return;
+       }
+
+       /* Get the multiple of PLL */
+       mult = ioread32be(base);
+
+       iounmap(base);
+
+       /* Check if this PLL is disabled */
+       if (mult & PLL_KILL) {
+               pr_debug("%s(): %s: Disabled\n", __func__, np->name);
+               return;
+       }
+       mult = (mult & GENMASK(6, 1)) >> 1;
+
+       parent_name = of_clk_get_parent_name(np, 0);
+       if (!parent_name) {
+               pr_err("%s(): %s: of_clk_get_parent_name() failed\n",
+                      __func__, np->name);
+               return;
+       }
+
+       i = of_property_count_strings(np, "clock-output-names");
+       if (i < 0) {
+               pr_err("%s(): %s: of_property_count_strings(clock-output-names) = %d\n",
+                      __func__, np->name, i);
+               return;
+       }
+
+       cod = kmalloc(sizeof(*cod) + i * sizeof(struct clk *), GFP_KERNEL);
+       if (!cod)
+               return;
+       cod->clks = (struct clk **)(cod + 1);
+       cod->clk_num = i;
+
+       for (i = 0; i < cod->clk_num; i++) {
+               _errno = of_property_read_string_index(np, "clock-output-names",
+                                                      i, &clk_name);
+               if (_errno < 0) {
+                       pr_err("%s(): %s: of_property_read_string_index(clock-output-names) = %d\n",
+                              __func__, np->name, _errno);
+                       goto return_clk_unregister;
+               }
+
+               cod->clks[i] = clk_register_fixed_factor(NULL, clk_name,
+                                              parent_name, 0, mult, 1 + i);
+               if (IS_ERR(cod->clks[i])) {
+                       pr_err("%s(): %s: clk_register_fixed_factor(%s) = %ld\n",
+                              __func__, np->name,
+                              clk_name, PTR_ERR(cod->clks[i]));
+                       goto return_clk_unregister;
+               }
+       }
+
+       _errno = of_clk_add_provider(np, of_clk_src_onecell_get, cod);
+       if (_errno < 0) {
+               pr_err("%s(): %s: of_clk_add_provider() = %d\n",
+                      __func__, np->name, _errno);
+               goto return_clk_unregister;
+       }
+
+       return;
+
+return_clk_unregister:
+       while (--i >= 0)
+               clk_unregister(cod->clks[i]);
+       kfree(cod);
+}
+
+CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init);
+CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init);
+CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init);
+CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init);
+CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init);
+CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init);
+CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init);
+CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init);
index 642cf37124d3780024d1739bf9298aa788c336e2..eb0152961d3c60652af108246e9d10be9d13a371 100644 (file)
@@ -9,7 +9,7 @@
  * Standard functionality for the common clock API.  See Documentation/clk.txt
  */
 
-#include <linux/clk-private.h>
+#include <linux/clk-provider.h>
 #include <linux/clk/clk-conf.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
@@ -37,6 +37,55 @@ static HLIST_HEAD(clk_root_list);
 static HLIST_HEAD(clk_orphan_list);
 static LIST_HEAD(clk_notifier_list);
 
+static long clk_core_get_accuracy(struct clk_core *clk);
+static unsigned long clk_core_get_rate(struct clk_core *clk);
+static int clk_core_get_phase(struct clk_core *clk);
+static bool clk_core_is_prepared(struct clk_core *clk);
+static bool clk_core_is_enabled(struct clk_core *clk);
+static struct clk_core *clk_core_lookup(const char *name);
+
+/***    private data structures    ***/
+
+struct clk_core {
+       const char              *name;
+       const struct clk_ops    *ops;
+       struct clk_hw           *hw;
+       struct module           *owner;
+       struct clk_core         *parent;
+       const char              **parent_names;
+       struct clk_core         **parents;
+       u8                      num_parents;
+       u8                      new_parent_index;
+       unsigned long           rate;
+       unsigned long           req_rate;
+       unsigned long           new_rate;
+       struct clk_core         *new_parent;
+       struct clk_core         *new_child;
+       unsigned long           flags;
+       unsigned int            enable_count;
+       unsigned int            prepare_count;
+       unsigned long           accuracy;
+       int                     phase;
+       struct hlist_head       children;
+       struct hlist_node       child_node;
+       struct hlist_node       debug_node;
+       struct hlist_head       clks;
+       unsigned int            notifier_count;
+#ifdef CONFIG_DEBUG_FS
+       struct dentry           *dentry;
+#endif
+       struct kref             ref;
+};
+
+struct clk {
+       struct clk_core *core;
+       const char *dev_id;
+       const char *con_id;
+       unsigned long min_rate;
+       unsigned long max_rate;
+       struct hlist_node child_node;
+};
+
 /***           locking             ***/
 static void clk_prepare_lock(void)
 {
@@ -114,7 +163,8 @@ static struct hlist_head *orphan_list[] = {
        NULL,
 };
 
-static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
+static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
+                                int level)
 {
        if (!c)
                return;
@@ -122,14 +172,14 @@ static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
        seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
                   level * 3 + 1, "",
                   30 - level * 3, c->name,
-                  c->enable_count, c->prepare_count, clk_get_rate(c),
-                  clk_get_accuracy(c), clk_get_phase(c));
+                  c->enable_count, c->prepare_count, clk_core_get_rate(c),
+                  clk_core_get_accuracy(c), clk_core_get_phase(c));
 }
 
-static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
+static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
                                     int level)
 {
-       struct clk *child;
+       struct clk_core *child;
 
        if (!c)
                return;
@@ -142,7 +192,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
 
 static int clk_summary_show(struct seq_file *s, void *data)
 {
-       struct clk *c;
+       struct clk_core *c;
        struct hlist_head **lists = (struct hlist_head **)s->private;
 
        seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy   phase\n");
@@ -172,7 +222,7 @@ static const struct file_operations clk_summary_fops = {
        .release        = single_release,
 };
 
-static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
+static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
 {
        if (!c)
                return;
@@ -180,14 +230,14 @@ static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
        seq_printf(s, "\"%s\": { ", c->name);
        seq_printf(s, "\"enable_count\": %d,", c->enable_count);
        seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
-       seq_printf(s, "\"rate\": %lu", clk_get_rate(c));
-       seq_printf(s, "\"accuracy\": %lu", clk_get_accuracy(c));
-       seq_printf(s, "\"phase\": %d", clk_get_phase(c));
+       seq_printf(s, "\"rate\": %lu", clk_core_get_rate(c));
+       seq_printf(s, "\"accuracy\": %lu", clk_core_get_accuracy(c));
+       seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
 }
 
-static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
+static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
 {
-       struct clk *child;
+       struct clk_core *child;
 
        if (!c)
                return;
@@ -204,7 +254,7 @@ static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
 
 static int clk_dump(struct seq_file *s, void *data)
 {
-       struct clk *c;
+       struct clk_core *c;
        bool first_node = true;
        struct hlist_head **lists = (struct hlist_head **)s->private;
 
@@ -240,7 +290,7 @@ static const struct file_operations clk_dump_fops = {
        .release        = single_release,
 };
 
-static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
+static int clk_debug_create_one(struct clk_core *clk, struct dentry *pdentry)
 {
        struct dentry *d;
        int ret = -ENOMEM;
@@ -315,7 +365,7 @@ out:
  * initialized.  Otherwise it bails out early since the debugfs clk tree
  * will be created lazily by clk_debug_init as part of a late_initcall.
  */
-static int clk_debug_register(struct clk *clk)
+static int clk_debug_register(struct clk_core *clk)
 {
        int ret = 0;
 
@@ -340,16 +390,12 @@ unlock:
  * debugfs clk tree if clk->dentry points to debugfs created by
  * clk_debug_register in __clk_init.
  */
-static void clk_debug_unregister(struct clk *clk)
+static void clk_debug_unregister(struct clk_core *clk)
 {
        mutex_lock(&clk_debug_lock);
-       if (!clk->dentry)
-               goto out;
-
        hlist_del_init(&clk->debug_node);
        debugfs_remove_recursive(clk->dentry);
        clk->dentry = NULL;
-out:
        mutex_unlock(&clk_debug_lock);
 }
 
@@ -358,8 +404,9 @@ struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
 {
        struct dentry *d = NULL;
 
-       if (hw->clk->dentry)
-               d = debugfs_create_file(name, mode, hw->clk->dentry, data, fops);
+       if (hw->core->dentry)
+               d = debugfs_create_file(name, mode, hw->core->dentry, data,
+                                       fops);
 
        return d;
 }
@@ -379,7 +426,7 @@ EXPORT_SYMBOL_GPL(clk_debugfs_add_file);
  */
 static int __init clk_debug_init(void)
 {
-       struct clk *clk;
+       struct clk_core *clk;
        struct dentry *d;
 
        rootdir = debugfs_create_dir("clk", NULL);
@@ -418,22 +465,20 @@ static int __init clk_debug_init(void)
 }
 late_initcall(clk_debug_init);
 #else
-static inline int clk_debug_register(struct clk *clk) { return 0; }
-static inline void clk_debug_reparent(struct clk *clk, struct clk *new_parent)
+static inline int clk_debug_register(struct clk_core *clk) { return 0; }
+static inline void clk_debug_reparent(struct clk_core *clk,
+                                     struct clk_core *new_parent)
 {
 }
-static inline void clk_debug_unregister(struct clk *clk)
+static inline void clk_debug_unregister(struct clk_core *clk)
 {
 }
 #endif
 
 /* caller must hold prepare_lock */
-static void clk_unprepare_unused_subtree(struct clk *clk)
+static void clk_unprepare_unused_subtree(struct clk_core *clk)
 {
-       struct clk *child;
-
-       if (!clk)
-               return;
+       struct clk_core *child;
 
        hlist_for_each_entry(child, &clk->children, child_node)
                clk_unprepare_unused_subtree(child);
@@ -444,7 +489,7 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
        if (clk->flags & CLK_IGNORE_UNUSED)
                return;
 
-       if (__clk_is_prepared(clk)) {
+       if (clk_core_is_prepared(clk)) {
                if (clk->ops->unprepare_unused)
                        clk->ops->unprepare_unused(clk->hw);
                else if (clk->ops->unprepare)
@@ -453,14 +498,11 @@ static void clk_unprepare_unused_subtree(struct clk *clk)
 }
 
 /* caller must hold prepare_lock */
-static void clk_disable_unused_subtree(struct clk *clk)
+static void clk_disable_unused_subtree(struct clk_core *clk)
 {
-       struct clk *child;
+       struct clk_core *child;
        unsigned long flags;
 
-       if (!clk)
-               goto out;
-
        hlist_for_each_entry(child, &clk->children, child_node)
                clk_disable_unused_subtree(child);
 
@@ -477,7 +519,7 @@ static void clk_disable_unused_subtree(struct clk *clk)
         * sequence.  call .disable_unused if available, otherwise fall
         * back to .disable
         */
-       if (__clk_is_enabled(clk)) {
+       if (clk_core_is_enabled(clk)) {
                if (clk->ops->disable_unused)
                        clk->ops->disable_unused(clk->hw);
                else if (clk->ops->disable)
@@ -486,9 +528,6 @@ static void clk_disable_unused_subtree(struct clk *clk)
 
 unlock_out:
        clk_enable_unlock(flags);
-
-out:
-       return;
 }
 
 static bool clk_ignore_unused;
@@ -501,7 +540,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
 
 static int clk_disable_unused(void)
 {
-       struct clk *clk;
+       struct clk_core *clk;
 
        if (clk_ignore_unused) {
                pr_warn("clk: Not disabling unused clocks\n");
@@ -532,48 +571,65 @@ late_initcall_sync(clk_disable_unused);
 
 const char *__clk_get_name(struct clk *clk)
 {
-       return !clk ? NULL : clk->name;
+       return !clk ? NULL : clk->core->name;
 }
 EXPORT_SYMBOL_GPL(__clk_get_name);
 
 struct clk_hw *__clk_get_hw(struct clk *clk)
 {
-       return !clk ? NULL : clk->hw;
+       return !clk ? NULL : clk->core->hw;
 }
 EXPORT_SYMBOL_GPL(__clk_get_hw);
 
 u8 __clk_get_num_parents(struct clk *clk)
 {
-       return !clk ? 0 : clk->num_parents;
+       return !clk ? 0 : clk->core->num_parents;
 }
 EXPORT_SYMBOL_GPL(__clk_get_num_parents);
 
 struct clk *__clk_get_parent(struct clk *clk)
 {
-       return !clk ? NULL : clk->parent;
+       if (!clk)
+               return NULL;
+
+       /* TODO: Create a per-user clk and change callers to call clk_put */
+       return !clk->core->parent ? NULL : clk->core->parent->hw->clk;
 }
 EXPORT_SYMBOL_GPL(__clk_get_parent);
 
-struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+static struct clk_core *clk_core_get_parent_by_index(struct clk_core *clk,
+                                                        u8 index)
 {
        if (!clk || index >= clk->num_parents)
                return NULL;
        else if (!clk->parents)
-               return __clk_lookup(clk->parent_names[index]);
+               return clk_core_lookup(clk->parent_names[index]);
        else if (!clk->parents[index])
                return clk->parents[index] =
-                       __clk_lookup(clk->parent_names[index]);
+                       clk_core_lookup(clk->parent_names[index]);
        else
                return clk->parents[index];
 }
+
+struct clk *clk_get_parent_by_index(struct clk *clk, u8 index)
+{
+       struct clk_core *parent;
+
+       if (!clk)
+               return NULL;
+
+       parent = clk_core_get_parent_by_index(clk->core, index);
+
+       return !parent ? NULL : parent->hw->clk;
+}
 EXPORT_SYMBOL_GPL(clk_get_parent_by_index);
 
 unsigned int __clk_get_enable_count(struct clk *clk)
 {
-       return !clk ? 0 : clk->enable_count;
+       return !clk ? 0 : clk->core->enable_count;
 }
 
-unsigned long __clk_get_rate(struct clk *clk)
+static unsigned long clk_core_get_rate_nolock(struct clk_core *clk)
 {
        unsigned long ret;
 
@@ -593,9 +649,17 @@ unsigned long __clk_get_rate(struct clk *clk)
 out:
        return ret;
 }
+
+unsigned long __clk_get_rate(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_get_rate_nolock(clk->core);
+}
 EXPORT_SYMBOL_GPL(__clk_get_rate);
 
-static unsigned long __clk_get_accuracy(struct clk *clk)
+static unsigned long __clk_get_accuracy(struct clk_core *clk)
 {
        if (!clk)
                return 0;
@@ -605,11 +669,11 @@ static unsigned long __clk_get_accuracy(struct clk *clk)
 
 unsigned long __clk_get_flags(struct clk *clk)
 {
-       return !clk ? 0 : clk->flags;
+       return !clk ? 0 : clk->core->flags;
 }
 EXPORT_SYMBOL_GPL(__clk_get_flags);
 
-bool __clk_is_prepared(struct clk *clk)
+static bool clk_core_is_prepared(struct clk_core *clk)
 {
        int ret;
 
@@ -630,7 +694,15 @@ out:
        return !!ret;
 }
 
-bool __clk_is_enabled(struct clk *clk)
+bool __clk_is_prepared(struct clk *clk)
+{
+       if (!clk)
+               return false;
+
+       return clk_core_is_prepared(clk->core);
+}
+
+static bool clk_core_is_enabled(struct clk_core *clk)
 {
        int ret;
 
@@ -650,12 +722,21 @@ bool __clk_is_enabled(struct clk *clk)
 out:
        return !!ret;
 }
+
+bool __clk_is_enabled(struct clk *clk)
+{
+       if (!clk)
+               return false;
+
+       return clk_core_is_enabled(clk->core);
+}
 EXPORT_SYMBOL_GPL(__clk_is_enabled);
 
-static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
+static struct clk_core *__clk_lookup_subtree(const char *name,
+                                            struct clk_core *clk)
 {
-       struct clk *child;
-       struct clk *ret;
+       struct clk_core *child;
+       struct clk_core *ret;
 
        if (!strcmp(clk->name, name))
                return clk;
@@ -669,10 +750,10 @@ static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
        return NULL;
 }
 
-struct clk *__clk_lookup(const char *name)
+static struct clk_core *clk_core_lookup(const char *name)
 {
-       struct clk *root_clk;
-       struct clk *ret;
+       struct clk_core *root_clk;
+       struct clk_core *ret;
 
        if (!name)
                return NULL;
@@ -694,42 +775,53 @@ struct clk *__clk_lookup(const char *name)
        return NULL;
 }
 
-/*
- * Helper for finding best parent to provide a given frequency. This can be used
- * directly as a determine_rate callback (e.g. for a mux), or from a more
- * complex clock that may combine a mux with other operations.
- */
-long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
-                             unsigned long *best_parent_rate,
-                             struct clk_hw **best_parent_p)
+static bool mux_is_better_rate(unsigned long rate, unsigned long now,
+                          unsigned long best, unsigned long flags)
 {
-       struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+       if (flags & CLK_MUX_ROUND_CLOSEST)
+               return abs(now - rate) < abs(best - rate);
+
+       return now <= rate && now > best;
+}
+
+static long
+clk_mux_determine_rate_flags(struct clk_hw *hw, unsigned long rate,
+                            unsigned long min_rate,
+                            unsigned long max_rate,
+                            unsigned long *best_parent_rate,
+                            struct clk_hw **best_parent_p,
+                            unsigned long flags)
+{
+       struct clk_core *core = hw->core, *parent, *best_parent = NULL;
        int i, num_parents;
        unsigned long parent_rate, best = 0;
 
        /* if NO_REPARENT flag set, pass through to current parent */
-       if (clk->flags & CLK_SET_RATE_NO_REPARENT) {
-               parent = clk->parent;
-               if (clk->flags & CLK_SET_RATE_PARENT)
-                       best = __clk_round_rate(parent, rate);
+       if (core->flags & CLK_SET_RATE_NO_REPARENT) {
+               parent = core->parent;
+               if (core->flags & CLK_SET_RATE_PARENT)
+                       best = __clk_determine_rate(parent ? parent->hw : NULL,
+                                                   rate, min_rate, max_rate);
                else if (parent)
-                       best = __clk_get_rate(parent);
+                       best = clk_core_get_rate_nolock(parent);
                else
-                       best = __clk_get_rate(clk);
+                       best = clk_core_get_rate_nolock(core);
                goto out;
        }
 
        /* find the parent that can provide the fastest rate <= rate */
-       num_parents = clk->num_parents;
+       num_parents = core->num_parents;
        for (i = 0; i < num_parents; i++) {
-               parent = clk_get_parent_by_index(clk, i);
+               parent = clk_core_get_parent_by_index(core, i);
                if (!parent)
                        continue;
-               if (clk->flags & CLK_SET_RATE_PARENT)
-                       parent_rate = __clk_round_rate(parent, rate);
+               if (core->flags & CLK_SET_RATE_PARENT)
+                       parent_rate = __clk_determine_rate(parent->hw, rate,
+                                                          min_rate,
+                                                          max_rate);
                else
-                       parent_rate = __clk_get_rate(parent);
-               if (parent_rate <= rate && parent_rate > best) {
+                       parent_rate = clk_core_get_rate_nolock(parent);
+               if (mux_is_better_rate(rate, parent_rate, best, flags)) {
                        best_parent = parent;
                        best = parent_rate;
                }
@@ -742,11 +834,63 @@ out:
 
        return best;
 }
+
+struct clk *__clk_lookup(const char *name)
+{
+       struct clk_core *core = clk_core_lookup(name);
+
+       return !core ? NULL : core->hw->clk;
+}
+
+static void clk_core_get_boundaries(struct clk_core *clk,
+                                   unsigned long *min_rate,
+                                   unsigned long *max_rate)
+{
+       struct clk *clk_user;
+
+       *min_rate = 0;
+       *max_rate = ULONG_MAX;
+
+       hlist_for_each_entry(clk_user, &clk->clks, child_node)
+               *min_rate = max(*min_rate, clk_user->min_rate);
+
+       hlist_for_each_entry(clk_user, &clk->clks, child_node)
+               *max_rate = min(*max_rate, clk_user->max_rate);
+}
+
+/*
+ * Helper for finding best parent to provide a given frequency. This can be used
+ * directly as a determine_rate callback (e.g. for a mux), or from a more
+ * complex clock that may combine a mux with other operations.
+ */
+long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
+                             unsigned long *best_parent_rate,
+                             struct clk_hw **best_parent_p)
+{
+       return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
+                                           best_parent_rate,
+                                           best_parent_p, 0);
+}
 EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
 
+long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
+                             unsigned long *best_parent_rate,
+                             struct clk_hw **best_parent_p)
+{
+       return clk_mux_determine_rate_flags(hw, rate, min_rate, max_rate,
+                                           best_parent_rate,
+                                           best_parent_p,
+                                           CLK_MUX_ROUND_CLOSEST);
+}
+EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
+
 /***        clk api        ***/
 
-void __clk_unprepare(struct clk *clk)
+static void clk_core_unprepare(struct clk_core *clk)
 {
        if (!clk)
                return;
@@ -762,7 +906,7 @@ void __clk_unprepare(struct clk *clk)
        if (clk->ops->unprepare)
                clk->ops->unprepare(clk->hw);
 
-       __clk_unprepare(clk->parent);
+       clk_core_unprepare(clk->parent);
 }
 
 /**
@@ -782,12 +926,12 @@ void clk_unprepare(struct clk *clk)
                return;
 
        clk_prepare_lock();
-       __clk_unprepare(clk);
+       clk_core_unprepare(clk->core);
        clk_prepare_unlock();
 }
 EXPORT_SYMBOL_GPL(clk_unprepare);
 
-int __clk_prepare(struct clk *clk)
+static int clk_core_prepare(struct clk_core *clk)
 {
        int ret = 0;
 
@@ -795,14 +939,14 @@ int __clk_prepare(struct clk *clk)
                return 0;
 
        if (clk->prepare_count == 0) {
-               ret = __clk_prepare(clk->parent);
+               ret = clk_core_prepare(clk->parent);
                if (ret)
                        return ret;
 
                if (clk->ops->prepare) {
                        ret = clk->ops->prepare(clk->hw);
                        if (ret) {
-                               __clk_unprepare(clk->parent);
+                               clk_core_unprepare(clk->parent);
                                return ret;
                        }
                }
@@ -829,15 +973,18 @@ int clk_prepare(struct clk *clk)
 {
        int ret;
 
+       if (!clk)
+               return 0;
+
        clk_prepare_lock();
-       ret = __clk_prepare(clk);
+       ret = clk_core_prepare(clk->core);
        clk_prepare_unlock();
 
        return ret;
 }
 EXPORT_SYMBOL_GPL(clk_prepare);
 
-static void __clk_disable(struct clk *clk)
+static void clk_core_disable(struct clk_core *clk)
 {
        if (!clk)
                return;
@@ -851,7 +998,15 @@ static void __clk_disable(struct clk *clk)
        if (clk->ops->disable)
                clk->ops->disable(clk->hw);
 
-       __clk_disable(clk->parent);
+       clk_core_disable(clk->parent);
+}
+
+static void __clk_disable(struct clk *clk)
+{
+       if (!clk)
+               return;
+
+       clk_core_disable(clk->core);
 }
 
 /**
@@ -879,7 +1034,7 @@ void clk_disable(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(clk_disable);
 
-static int __clk_enable(struct clk *clk)
+static int clk_core_enable(struct clk_core *clk)
 {
        int ret = 0;
 
@@ -890,7 +1045,7 @@ static int __clk_enable(struct clk *clk)
                return -ESHUTDOWN;
 
        if (clk->enable_count == 0) {
-               ret = __clk_enable(clk->parent);
+               ret = clk_core_enable(clk->parent);
 
                if (ret)
                        return ret;
@@ -898,7 +1053,7 @@ static int __clk_enable(struct clk *clk)
                if (clk->ops->enable) {
                        ret = clk->ops->enable(clk->hw);
                        if (ret) {
-                               __clk_disable(clk->parent);
+                               clk_core_disable(clk->parent);
                                return ret;
                        }
                }
@@ -908,6 +1063,14 @@ static int __clk_enable(struct clk *clk)
        return 0;
 }
 
+static int __clk_enable(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_enable(clk->core);
+}
+
 /**
  * clk_enable - ungate a clock
  * @clk: the clk being ungated
@@ -934,17 +1097,13 @@ int clk_enable(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(clk_enable);
 
-/**
- * __clk_round_rate - round the given rate for a clk
- * @clk: round the rate of this clock
- * @rate: the rate which is to be rounded
- *
- * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
- */
-unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
+static unsigned long clk_core_round_rate_nolock(struct clk_core *clk,
+                                               unsigned long rate,
+                                               unsigned long min_rate,
+                                               unsigned long max_rate)
 {
        unsigned long parent_rate = 0;
-       struct clk *parent;
+       struct clk_core *parent;
        struct clk_hw *parent_hw;
 
        if (!clk)
@@ -956,15 +1115,59 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
 
        if (clk->ops->determine_rate) {
                parent_hw = parent ? parent->hw : NULL;
-               return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
-                                               &parent_hw);
+               return clk->ops->determine_rate(clk->hw, rate,
+                                               min_rate, max_rate,
+                                               &parent_rate, &parent_hw);
        } else if (clk->ops->round_rate)
                return clk->ops->round_rate(clk->hw, rate, &parent_rate);
        else if (clk->flags & CLK_SET_RATE_PARENT)
-               return __clk_round_rate(clk->parent, rate);
+               return clk_core_round_rate_nolock(clk->parent, rate, min_rate,
+                                                 max_rate);
        else
                return clk->rate;
 }
+
+/**
+ * __clk_determine_rate - get the closest rate actually supported by a clock
+ * @hw: determine the rate of this clock
+ * @rate: target rate
+ * @min_rate: returned rate must be greater than this rate
+ * @max_rate: returned rate must be less than this rate
+ *
+ * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate and
+ * .determine_rate.
+ */
+unsigned long __clk_determine_rate(struct clk_hw *hw,
+                                  unsigned long rate,
+                                  unsigned long min_rate,
+                                  unsigned long max_rate)
+{
+       if (!hw)
+               return 0;
+
+       return clk_core_round_rate_nolock(hw->core, rate, min_rate, max_rate);
+}
+EXPORT_SYMBOL_GPL(__clk_determine_rate);
+
+/**
+ * __clk_round_rate - round the given rate for a clk
+ * @clk: round the rate of this clock
+ * @rate: the rate which is to be rounded
+ *
+ * Caller must hold prepare_lock.  Useful for clk_ops such as .set_rate
+ */
+unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
+{
+       unsigned long min_rate;
+       unsigned long max_rate;
+
+       if (!clk)
+               return 0;
+
+       clk_core_get_boundaries(clk->core, &min_rate, &max_rate);
+
+       return clk_core_round_rate_nolock(clk->core, rate, min_rate, max_rate);
+}
 EXPORT_SYMBOL_GPL(__clk_round_rate);
 
 /**
@@ -980,6 +1183,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
 {
        unsigned long ret;
 
+       if (!clk)
+               return 0;
+
        clk_prepare_lock();
        ret = __clk_round_rate(clk, rate);
        clk_prepare_unlock();
@@ -1002,22 +1208,21 @@ EXPORT_SYMBOL_GPL(clk_round_rate);
  * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
  * a driver returns that.
  */
-static int __clk_notify(struct clk *clk, unsigned long msg,
+static int __clk_notify(struct clk_core *clk, unsigned long msg,
                unsigned long old_rate, unsigned long new_rate)
 {
        struct clk_notifier *cn;
        struct clk_notifier_data cnd;
        int ret = NOTIFY_DONE;
 
-       cnd.clk = clk;
        cnd.old_rate = old_rate;
        cnd.new_rate = new_rate;
 
        list_for_each_entry(cn, &clk_notifier_list, node) {
-               if (cn->clk == clk) {
+               if (cn->clk->core == clk) {
+                       cnd.clk = cn->clk;
                        ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
                                        &cnd);
-                       break;
                }
        }
 
@@ -1035,10 +1240,10 @@ static int __clk_notify(struct clk *clk, unsigned long msg,
  *
  * Caller must hold prepare_lock.
  */
-static void __clk_recalc_accuracies(struct clk *clk)
+static void __clk_recalc_accuracies(struct clk_core *clk)
 {
        unsigned long parent_accuracy = 0;
-       struct clk *child;
+       struct clk_core *child;
 
        if (clk->parent)
                parent_accuracy = clk->parent->accuracy;
@@ -1053,6 +1258,20 @@ static void __clk_recalc_accuracies(struct clk *clk)
                __clk_recalc_accuracies(child);
 }
 
+static long clk_core_get_accuracy(struct clk_core *clk)
+{
+       unsigned long accuracy;
+
+       clk_prepare_lock();
+       if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
+               __clk_recalc_accuracies(clk);
+
+       accuracy = __clk_get_accuracy(clk);
+       clk_prepare_unlock();
+
+       return accuracy;
+}
+
 /**
  * clk_get_accuracy - return the accuracy of clk
  * @clk: the clk whose accuracy is being returned
@@ -1064,20 +1283,15 @@ static void __clk_recalc_accuracies(struct clk *clk)
  */
 long clk_get_accuracy(struct clk *clk)
 {
-       unsigned long accuracy;
-
-       clk_prepare_lock();
-       if (clk && (clk->flags & CLK_GET_ACCURACY_NOCACHE))
-               __clk_recalc_accuracies(clk);
-
-       accuracy = __clk_get_accuracy(clk);
-       clk_prepare_unlock();
+       if (!clk)
+               return 0;
 
-       return accuracy;
+       return clk_core_get_accuracy(clk->core);
 }
 EXPORT_SYMBOL_GPL(clk_get_accuracy);
 
-static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
+static unsigned long clk_recalc(struct clk_core *clk,
+                               unsigned long parent_rate)
 {
        if (clk->ops->recalc_rate)
                return clk->ops->recalc_rate(clk->hw, parent_rate);
@@ -1098,11 +1312,11 @@ static unsigned long clk_recalc(struct clk *clk, unsigned long parent_rate)
  *
  * Caller must hold prepare_lock.
  */
-static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
+static void __clk_recalc_rates(struct clk_core *clk, unsigned long msg)
 {
        unsigned long old_rate;
        unsigned long parent_rate = 0;
-       struct clk *child;
+       struct clk_core *child;
 
        old_rate = clk->rate;
 
@@ -1122,15 +1336,7 @@ static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
                __clk_recalc_rates(child, msg);
 }
 
-/**
- * clk_get_rate - return the rate of clk
- * @clk: the clk whose rate is being returned
- *
- * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
- * is set, which means a recalc_rate will be issued.
- * If clk is NULL then returns 0.
- */
-unsigned long clk_get_rate(struct clk *clk)
+static unsigned long clk_core_get_rate(struct clk_core *clk)
 {
        unsigned long rate;
 
@@ -1139,14 +1345,32 @@ unsigned long clk_get_rate(struct clk *clk)
        if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
                __clk_recalc_rates(clk, 0);
 
-       rate = __clk_get_rate(clk);
+       rate = clk_core_get_rate_nolock(clk);
        clk_prepare_unlock();
 
        return rate;
 }
+EXPORT_SYMBOL_GPL(clk_core_get_rate);
+
+/**
+ * clk_get_rate - return the rate of clk
+ * @clk: the clk whose rate is being returned
+ *
+ * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
+ * is set, which means a recalc_rate will be issued.
+ * If clk is NULL then returns 0.
+ */
+unsigned long clk_get_rate(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_get_rate(clk->core);
+}
 EXPORT_SYMBOL_GPL(clk_get_rate);
 
-static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
+static int clk_fetch_parent_index(struct clk_core *clk,
+                                 struct clk_core *parent)
 {
        int i;
 
@@ -1160,7 +1384,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
        /*
         * find index of new parent clock using cached parent ptrs,
         * or if not yet cached, use string name comparison and cache
-        * them now to avoid future calls to __clk_lookup.
+        * them now to avoid future calls to clk_core_lookup.
         */
        for (i = 0; i < clk->num_parents; i++) {
                if (clk->parents[i] == parent)
@@ -1170,7 +1394,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
                        continue;
 
                if (!strcmp(clk->parent_names[i], parent->name)) {
-                       clk->parents[i] = __clk_lookup(parent->name);
+                       clk->parents[i] = clk_core_lookup(parent->name);
                        return i;
                }
        }
@@ -1178,7 +1402,7 @@ static int clk_fetch_parent_index(struct clk *clk, struct clk *parent)
        return -EINVAL;
 }
 
-static void clk_reparent(struct clk *clk, struct clk *new_parent)
+static void clk_reparent(struct clk_core *clk, struct clk_core *new_parent)
 {
        hlist_del(&clk->child_node);
 
@@ -1195,10 +1419,11 @@ static void clk_reparent(struct clk *clk, struct clk *new_parent)
        clk->parent = new_parent;
 }
 
-static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
+static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
+                                          struct clk_core *parent)
 {
        unsigned long flags;
-       struct clk *old_parent = clk->parent;
+       struct clk_core *old_parent = clk->parent;
 
        /*
         * Migrate prepare state between parents and prevent race with
@@ -1218,9 +1443,9 @@ static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
         * See also: Comment for clk_set_parent() below.
         */
        if (clk->prepare_count) {
-               __clk_prepare(parent);
-               clk_enable(parent);
-               clk_enable(clk);
+               clk_core_prepare(parent);
+               clk_core_enable(parent);
+               clk_core_enable(clk);
        }
 
        /* update the clk tree topology */
@@ -1231,25 +1456,27 @@ static struct clk *__clk_set_parent_before(struct clk *clk, struct clk *parent)
        return old_parent;
 }
 
-static void __clk_set_parent_after(struct clk *clk, struct clk *parent,
-               struct clk *old_parent)
+static void __clk_set_parent_after(struct clk_core *core,
+                                  struct clk_core *parent,
+                                  struct clk_core *old_parent)
 {
        /*
         * Finish the migration of prepare state and undo the changes done
         * for preventing a race with clk_enable().
         */
-       if (clk->prepare_count) {
-               clk_disable(clk);
-               clk_disable(old_parent);
-               __clk_unprepare(old_parent);
+       if (core->prepare_count) {
+               clk_core_disable(core);
+               clk_core_disable(old_parent);
+               clk_core_unprepare(old_parent);
        }
 }
 
-static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
+static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
+                           u8 p_index)
 {
        unsigned long flags;
        int ret = 0;
-       struct clk *old_parent;
+       struct clk_core *old_parent;
 
        old_parent = __clk_set_parent_before(clk, parent);
 
@@ -1263,9 +1490,9 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
                clk_enable_unlock(flags);
 
                if (clk->prepare_count) {
-                       clk_disable(clk);
-                       clk_disable(parent);
-                       __clk_unprepare(parent);
+                       clk_core_disable(clk);
+                       clk_core_disable(parent);
+                       clk_core_unprepare(parent);
                }
                return ret;
        }
@@ -1291,9 +1518,10 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent, u8 p_index)
  *
  * Caller must hold prepare_lock.
  */
-static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
+static int __clk_speculate_rates(struct clk_core *clk,
+                                unsigned long parent_rate)
 {
-       struct clk *child;
+       struct clk_core *child;
        unsigned long new_rate;
        int ret = NOTIFY_DONE;
 
@@ -1319,10 +1547,10 @@ out:
        return ret;
 }
 
-static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
-                            struct clk *new_parent, u8 p_index)
+static void clk_calc_subtree(struct clk_core *clk, unsigned long new_rate,
+                            struct clk_core *new_parent, u8 p_index)
 {
-       struct clk *child;
+       struct clk_core *child;
 
        clk->new_rate = new_rate;
        clk->new_parent = new_parent;
@@ -1342,13 +1570,16 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate,
  * calculate the new rates returning the topmost clock that has to be
  * changed.
  */
-static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
+static struct clk_core *clk_calc_new_rates(struct clk_core *clk,
+                                          unsigned long rate)
 {
-       struct clk *top = clk;
-       struct clk *old_parent, *parent;
+       struct clk_core *top = clk;
+       struct clk_core *old_parent, *parent;
        struct clk_hw *parent_hw;
        unsigned long best_parent_rate = 0;
        unsigned long new_rate;
+       unsigned long min_rate;
+       unsigned long max_rate;
        int p_index = 0;
 
        /* sanity */
@@ -1360,16 +1591,22 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
        if (parent)
                best_parent_rate = parent->rate;
 
+       clk_core_get_boundaries(clk, &min_rate, &max_rate);
+
        /* find the closest rate and parent clk/rate */
        if (clk->ops->determine_rate) {
                parent_hw = parent ? parent->hw : NULL;
                new_rate = clk->ops->determine_rate(clk->hw, rate,
+                                                   min_rate,
+                                                   max_rate,
                                                    &best_parent_rate,
                                                    &parent_hw);
-               parent = parent_hw ? parent_hw->clk : NULL;
+               parent = parent_hw ? parent_hw->core : NULL;
        } else if (clk->ops->round_rate) {
                new_rate = clk->ops->round_rate(clk->hw, rate,
                                                &best_parent_rate);
+               if (new_rate < min_rate || new_rate > max_rate)
+                       return NULL;
        } else if (!parent || !(clk->flags & CLK_SET_RATE_PARENT)) {
                /* pass-through clock without adjustable parent */
                clk->new_rate = clk->rate;
@@ -1390,7 +1627,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
        }
 
        /* try finding the new parent index */
-       if (parent) {
+       if (parent && clk->num_parents > 1) {
                p_index = clk_fetch_parent_index(clk, parent);
                if (p_index < 0) {
                        pr_debug("%s: clk %s can not be parent of clk %s\n",
@@ -1414,9 +1651,10 @@ out:
  * so that in case of an error we can walk down the whole tree again and
  * abort the change.
  */
-static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
+static struct clk_core *clk_propagate_rate_change(struct clk_core *clk,
+                                                 unsigned long event)
 {
-       struct clk *child, *tmp_clk, *fail_clk = NULL;
+       struct clk_core *child, *tmp_clk, *fail_clk = NULL;
        int ret = NOTIFY_DONE;
 
        if (clk->rate == clk->new_rate)
@@ -1451,14 +1689,14 @@ static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long even
  * walk down a subtree and set the new rates notifying the rate
  * change on the way
  */
-static void clk_change_rate(struct clk *clk)
+static void clk_change_rate(struct clk_core *clk)
 {
-       struct clk *child;
+       struct clk_core *child;
        struct hlist_node *tmp;
        unsigned long old_rate;
        unsigned long best_parent_rate = 0;
        bool skip_set_rate = false;
-       struct clk *old_parent;
+       struct clk_core *old_parent;
 
        old_rate = clk->rate;
 
@@ -1506,6 +1744,45 @@ static void clk_change_rate(struct clk *clk)
                clk_change_rate(clk->new_child);
 }
 
+static int clk_core_set_rate_nolock(struct clk_core *clk,
+                                   unsigned long req_rate)
+{
+       struct clk_core *top, *fail_clk;
+       unsigned long rate = req_rate;
+       int ret = 0;
+
+       if (!clk)
+               return 0;
+
+       /* bail early if nothing to do */
+       if (rate == clk_core_get_rate_nolock(clk))
+               return 0;
+
+       if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count)
+               return -EBUSY;
+
+       /* calculate new rates and get the topmost changed clock */
+       top = clk_calc_new_rates(clk, rate);
+       if (!top)
+               return -EINVAL;
+
+       /* notify that we are about to change rates */
+       fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
+       if (fail_clk) {
+               pr_debug("%s: failed to set %s rate\n", __func__,
+                               fail_clk->name);
+               clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
+               return -EBUSY;
+       }
+
+       /* change the rates */
+       clk_change_rate(top);
+
+       clk->req_rate = req_rate;
+
+       return ret;
+}
+
 /**
  * clk_set_rate - specify a new rate for clk
  * @clk: the clk whose rate is being changed
@@ -1529,8 +1806,7 @@ static void clk_change_rate(struct clk *clk)
  */
 int clk_set_rate(struct clk *clk, unsigned long rate)
 {
-       struct clk *top, *fail_clk;
-       int ret = 0;
+       int ret;
 
        if (!clk)
                return 0;
@@ -1538,41 +1814,81 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
        /* prevent racing with updates to the clock topology */
        clk_prepare_lock();
 
-       /* bail early if nothing to do */
-       if (rate == clk_get_rate(clk))
-               goto out;
+       ret = clk_core_set_rate_nolock(clk->core, rate);
 
-       if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
-               ret = -EBUSY;
-               goto out;
-       }
+       clk_prepare_unlock();
 
-       /* calculate new rates and get the topmost changed clock */
-       top = clk_calc_new_rates(clk, rate);
-       if (!top) {
-               ret = -EINVAL;
-               goto out;
-       }
+       return ret;
+}
+EXPORT_SYMBOL_GPL(clk_set_rate);
 
-       /* notify that we are about to change rates */
-       fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
-       if (fail_clk) {
-               pr_debug("%s: failed to set %s rate\n", __func__,
-                               fail_clk->name);
-               clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
-               ret = -EBUSY;
-               goto out;
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
+{
+       int ret = 0;
+
+       if (!clk)
+               return 0;
+
+       if (min > max) {
+               pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
+                      __func__, clk->core->name, clk->dev_id, clk->con_id,
+                      min, max);
+               return -EINVAL;
        }
 
-       /* change the rates */
-       clk_change_rate(top);
+       clk_prepare_lock();
+
+       if (min != clk->min_rate || max != clk->max_rate) {
+               clk->min_rate = min;
+               clk->max_rate = max;
+               ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+       }
 
-out:
        clk_prepare_unlock();
 
        return ret;
 }
-EXPORT_SYMBOL_GPL(clk_set_rate);
+EXPORT_SYMBOL_GPL(clk_set_rate_range);
+
+/**
+ * clk_set_min_rate - set a minimum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired minimum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_min_rate(struct clk *clk, unsigned long rate)
+{
+       if (!clk)
+               return 0;
+
+       return clk_set_rate_range(clk, rate, clk->max_rate);
+}
+EXPORT_SYMBOL_GPL(clk_set_min_rate);
+
+/**
+ * clk_set_max_rate - set a maximum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_max_rate(struct clk *clk, unsigned long rate)
+{
+       if (!clk)
+               return 0;
+
+       return clk_set_rate_range(clk, clk->min_rate, rate);
+}
+EXPORT_SYMBOL_GPL(clk_set_max_rate);
 
 /**
  * clk_get_parent - return the parent of a clk
@@ -1599,11 +1915,11 @@ EXPORT_SYMBOL_GPL(clk_get_parent);
  *
  * For single-parent clocks without .get_parent, first check to see if the
  * .parents array exists, and if so use it to avoid an expensive tree
- * traversal.  If .parents does not exist then walk the tree with __clk_lookup.
+ * traversal.  If .parents does not exist then walk the tree.
  */
-static struct clk *__clk_init_parent(struct clk *clk)
+static struct clk_core *__clk_init_parent(struct clk_core *clk)
 {
-       struct clk *ret = NULL;
+       struct clk_core *ret = NULL;
        u8 index;
 
        /* handle the trivial cases */
@@ -1613,7 +1929,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
 
        if (clk->num_parents == 1) {
                if (IS_ERR_OR_NULL(clk->parent))
-                       clk->parent = __clk_lookup(clk->parent_names[0]);
+                       clk->parent = clk_core_lookup(clk->parent_names[0]);
                ret = clk->parent;
                goto out;
        }
@@ -1627,8 +1943,8 @@ static struct clk *__clk_init_parent(struct clk *clk)
 
        /*
         * Do our best to cache parent clocks in clk->parents.  This prevents
-        * unnecessary and expensive calls to __clk_lookup.  We don't set
-        * clk->parent here; that is done by the calling function
+        * unnecessary and expensive lookups.  We don't set clk->parent here;
+        * that is done by the calling function.
         */
 
        index = clk->ops->get_parent(clk->hw);
@@ -1638,13 +1954,14 @@ static struct clk *__clk_init_parent(struct clk *clk)
                        kcalloc(clk->num_parents, sizeof(struct clk *),
                                        GFP_KERNEL);
 
-       ret = clk_get_parent_by_index(clk, index);
+       ret = clk_core_get_parent_by_index(clk, index);
 
 out:
        return ret;
 }
 
-void __clk_reparent(struct clk *clk, struct clk *new_parent)
+static void clk_core_reparent(struct clk_core *clk,
+                                 struct clk_core *new_parent)
 {
        clk_reparent(clk, new_parent);
        __clk_recalc_accuracies(clk);
@@ -1652,23 +1969,40 @@ void __clk_reparent(struct clk *clk, struct clk *new_parent)
 }
 
 /**
- * clk_set_parent - switch the parent of a mux clk
- * @clk: the mux clk whose input we are switching
- * @parent: the new input to clk
+ * clk_has_parent - check if a clock is a possible parent for another
+ * @clk: clock source
+ * @parent: parent clock source
  *
- * Re-parent clk to use parent as its new input source.  If clk is in
- * prepared state, the clk will get enabled for the duration of this call. If
- * that's not acceptable for a specific clk (Eg: the consumer can't handle
- * that, the reparenting is glitchy in hardware, etc), use the
- * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
- *
- * After successfully changing clk's parent clk_set_parent will update the
- * clk topology, sysfs topology and propagate rate recalculation via
- * __clk_recalc_rates.
+ * This function can be used in drivers that need to check that a clock can be
+ * the parent of another without actually changing the parent.
  *
- * Returns 0 on success, -EERROR otherwise.
+ * Returns true if @parent is a possible parent for @clk, false otherwise.
  */
-int clk_set_parent(struct clk *clk, struct clk *parent)
+bool clk_has_parent(struct clk *clk, struct clk *parent)
+{
+       struct clk_core *core, *parent_core;
+       unsigned int i;
+
+       /* NULL clocks should be nops, so return success if either is NULL. */
+       if (!clk || !parent)
+               return true;
+
+       core = clk->core;
+       parent_core = parent->core;
+
+       /* Optimize for the case where the parent is already the parent. */
+       if (core->parent == parent_core)
+               return true;
+
+       for (i = 0; i < core->num_parents; i++)
+               if (strcmp(core->parent_names[i], parent_core->name) == 0)
+                       return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(clk_has_parent);
+
+static int clk_core_set_parent(struct clk_core *clk, struct clk_core *parent)
 {
        int ret = 0;
        int p_index = 0;
@@ -1728,6 +2062,31 @@ out:
 
        return ret;
 }
+
+/**
+ * clk_set_parent - switch the parent of a mux clk
+ * @clk: the mux clk whose input we are switching
+ * @parent: the new input to clk
+ *
+ * Re-parent clk to use parent as its new input source.  If clk is in
+ * prepared state, the clk will get enabled for the duration of this call. If
+ * that's not acceptable for a specific clk (Eg: the consumer can't handle
+ * that, the reparenting is glitchy in hardware, etc), use the
+ * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
+ *
+ * After successfully changing clk's parent clk_set_parent will update the
+ * clk topology, sysfs topology and propagate rate recalculation via
+ * __clk_recalc_rates.
+ *
+ * Returns 0 on success, -EERROR otherwise.
+ */
+int clk_set_parent(struct clk *clk, struct clk *parent)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_set_parent(clk->core, parent ? parent->core : NULL);
+}
 EXPORT_SYMBOL_GPL(clk_set_parent);
 
 /**
@@ -1764,13 +2123,13 @@ int clk_set_phase(struct clk *clk, int degrees)
 
        clk_prepare_lock();
 
-       if (!clk->ops->set_phase)
+       if (!clk->core->ops->set_phase)
                goto out_unlock;
 
-       ret = clk->ops->set_phase(clk->hw, degrees);
+       ret = clk->core->ops->set_phase(clk->core->hw, degrees);
 
        if (!ret)
-               clk->phase = degrees;
+               clk->core->phase = degrees;
 
 out_unlock:
        clk_prepare_unlock();
@@ -1778,15 +2137,9 @@ out_unlock:
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(clk_set_phase);
 
-/**
- * clk_get_phase - return the phase shift of a clock signal
- * @clk: clock signal source
- *
- * Returns the phase shift of a clock node in degrees, otherwise returns
- * -EERROR.
- */
-int clk_get_phase(struct clk *clk)
+static int clk_core_get_phase(struct clk_core *clk)
 {
        int ret = 0;
 
@@ -1800,28 +2153,48 @@ int clk_get_phase(struct clk *clk)
 out:
        return ret;
 }
+EXPORT_SYMBOL_GPL(clk_get_phase);
+
+/**
+ * clk_get_phase - return the phase shift of a clock signal
+ * @clk: clock signal source
+ *
+ * Returns the phase shift of a clock node in degrees, otherwise returns
+ * -EERROR.
+ */
+int clk_get_phase(struct clk *clk)
+{
+       if (!clk)
+               return 0;
+
+       return clk_core_get_phase(clk->core);
+}
 
 /**
  * __clk_init - initialize the data structures in a struct clk
  * @dev:       device initializing this clk, placeholder for now
  * @clk:       clk being initialized
  *
- * Initializes the lists in struct clk, queries the hardware for the
+ * Initializes the lists in struct clk_core, queries the hardware for the
  * parent and rate and sets them both.
  */
-int __clk_init(struct device *dev, struct clk *clk)
+static int __clk_init(struct device *dev, struct clk *clk_user)
 {
        int i, ret = 0;
-       struct clk *orphan;
+       struct clk_core *orphan;
        struct hlist_node *tmp2;
+       struct clk_core *clk;
+       unsigned long rate;
 
-       if (!clk)
+       if (!clk_user)
                return -EINVAL;
 
+       clk = clk_user->core;
+
        clk_prepare_lock();
 
        /* check to see if a clock with this name is already registered */
-       if (__clk_lookup(clk->name)) {
+       if (clk_core_lookup(clk->name)) {
                pr_debug("%s: clk %s already initialized\n",
                                __func__, clk->name);
                ret = -EEXIST;
@@ -1873,7 +2246,7 @@ int __clk_init(struct device *dev, struct clk *clk)
                clk->parents = kcalloc(clk->num_parents, sizeof(struct clk *),
                                        GFP_KERNEL);
                /*
-                * __clk_lookup returns NULL for parents that have not been
+                * clk_core_lookup returns NULL for parents that have not been
                 * clk_init'd; thus any access to clk->parents[] must check
                 * for a NULL pointer.  We can always perform lazy lookups for
                 * missing parents later on.
@@ -1881,7 +2254,7 @@ int __clk_init(struct device *dev, struct clk *clk)
                if (clk->parents)
                        for (i = 0; i < clk->num_parents; i++)
                                clk->parents[i] =
-                                       __clk_lookup(clk->parent_names[i]);
+                                       clk_core_lookup(clk->parent_names[i]);
        }
 
        clk->parent = __clk_init_parent(clk);
@@ -1936,12 +2309,13 @@ int __clk_init(struct device *dev, struct clk *clk)
         * then rate is set to zero.
         */
        if (clk->ops->recalc_rate)
-               clk->rate = clk->ops->recalc_rate(clk->hw,
-                               __clk_get_rate(clk->parent));
+               rate = clk->ops->recalc_rate(clk->hw,
+                               clk_core_get_rate_nolock(clk->parent));
        else if (clk->parent)
-               clk->rate = clk->parent->rate;
+               rate = clk->parent->rate;
        else
-               clk->rate = 0;
+               rate = 0;
+       clk->rate = clk->req_rate = rate;
 
        /*
         * walk the list of orphan clocks and reparent any that are children of
@@ -1951,13 +2325,13 @@ int __clk_init(struct device *dev, struct clk *clk)
                if (orphan->num_parents && orphan->ops->get_parent) {
                        i = orphan->ops->get_parent(orphan->hw);
                        if (!strcmp(clk->name, orphan->parent_names[i]))
-                               __clk_reparent(orphan, clk);
+                               clk_core_reparent(orphan, clk);
                        continue;
                }
 
                for (i = 0; i < orphan->num_parents; i++)
                        if (!strcmp(clk->name, orphan->parent_names[i])) {
-                               __clk_reparent(orphan, clk);
+                               clk_core_reparent(orphan, clk);
                                break;
                        }
         }
@@ -1983,47 +2357,39 @@ out:
        return ret;
 }
 
-/**
- * __clk_register - register a clock and return a cookie.
- *
- * Same as clk_register, except that the .clk field inside hw shall point to a
- * preallocated (generally statically allocated) struct clk. None of the fields
- * of the struct clk need to be initialized.
- *
- * The data pointed to by .init and .clk field shall NOT be marked as init
- * data.
- *
- * __clk_register is only exposed via clk-private.h and is intended for use with
- * very large numbers of clocks that need to be statically initialized.  It is
- * a layering violation to include clk-private.h from any code which implements
- * a clock's .ops; as such any statically initialized clock data MUST be in a
- * separate C file from the logic that implements its operations.  Returns 0
- * on success, otherwise an error code.
- */
-struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
+struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
+                            const char *con_id)
 {
-       int ret;
        struct clk *clk;
 
-       clk = hw->clk;
-       clk->name = hw->init->name;
-       clk->ops = hw->init->ops;
-       clk->hw = hw;
-       clk->flags = hw->init->flags;
-       clk->parent_names = hw->init->parent_names;
-       clk->num_parents = hw->init->num_parents;
-       if (dev && dev->driver)
-               clk->owner = dev->driver->owner;
-       else
-               clk->owner = NULL;
+       /* This is to allow this function to be chained to others */
+       if (!hw || IS_ERR(hw))
+               return (struct clk *) hw;
 
-       ret = __clk_init(dev, clk);
-       if (ret)
-               return ERR_PTR(ret);
+       clk = kzalloc(sizeof(*clk), GFP_KERNEL);
+       if (!clk)
+               return ERR_PTR(-ENOMEM);
+
+       clk->core = hw->core;
+       clk->dev_id = dev_id;
+       clk->con_id = con_id;
+       clk->max_rate = ULONG_MAX;
+
+       clk_prepare_lock();
+       hlist_add_head(&clk->child_node, &hw->core->clks);
+       clk_prepare_unlock();
 
        return clk;
 }
-EXPORT_SYMBOL_GPL(__clk_register);
+
+void __clk_free_clk(struct clk *clk)
+{
+       clk_prepare_lock();
+       hlist_del(&clk->child_node);
+       clk_prepare_unlock();
+
+       kfree(clk);
+}
 
 /**
  * clk_register - allocate a new clock, register it and return an opaque cookie
@@ -2039,7 +2405,7 @@ EXPORT_SYMBOL_GPL(__clk_register);
 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
 {
        int i, ret;
-       struct clk *clk;
+       struct clk_core *clk;
 
        clk = kzalloc(sizeof(*clk), GFP_KERNEL);
        if (!clk) {
@@ -2060,7 +2426,7 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
        clk->hw = hw;
        clk->flags = hw->init->flags;
        clk->num_parents = hw->init->num_parents;
-       hw->clk = clk;
+       hw->core = clk;
 
        /* allocate local copy in case parent_names is __initdata */
        clk->parent_names = kcalloc(clk->num_parents, sizeof(char *),
@@ -2084,9 +2450,21 @@ struct clk *clk_register(struct device *dev, struct clk_hw *hw)
                }
        }
 
-       ret = __clk_init(dev, clk);
+       INIT_HLIST_HEAD(&clk->clks);
+
+       hw->clk = __clk_create_clk(hw, NULL, NULL);
+       if (IS_ERR(hw->clk)) {
+               pr_err("%s: could not allocate per-user clk\n", __func__);
+               ret = PTR_ERR(hw->clk);
+               goto fail_parent_names_copy;
+       }
+
+       ret = __clk_init(dev, hw->clk);
        if (!ret)
-               return clk;
+               return hw->clk;
+
+       __clk_free_clk(hw->clk);
+       hw->clk = NULL;
 
 fail_parent_names_copy:
        while (--i >= 0)
@@ -2107,7 +2485,7 @@ EXPORT_SYMBOL_GPL(clk_register);
  */
 static void __clk_release(struct kref *ref)
 {
-       struct clk *clk = container_of(ref, struct clk, ref);
+       struct clk_core *clk = container_of(ref, struct clk_core, ref);
        int i = clk->num_parents;
 
        kfree(clk->parents);
@@ -2165,12 +2543,13 @@ void clk_unregister(struct clk *clk)
        if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
                return;
 
-       clk_debug_unregister(clk);
+       clk_debug_unregister(clk->core);
 
        clk_prepare_lock();
 
-       if (clk->ops == &clk_nodrv_ops) {
-               pr_err("%s: unregistered clock: %s\n", __func__, clk->name);
+       if (clk->core->ops == &clk_nodrv_ops) {
+               pr_err("%s: unregistered clock: %s\n", __func__,
+                      clk->core->name);
                return;
        }
        /*
@@ -2178,24 +2557,25 @@ void clk_unregister(struct clk *clk)
         * a reference to this clock.
         */
        flags = clk_enable_lock();
-       clk->ops = &clk_nodrv_ops;
+       clk->core->ops = &clk_nodrv_ops;
        clk_enable_unlock(flags);
 
-       if (!hlist_empty(&clk->children)) {
-               struct clk *child;
+       if (!hlist_empty(&clk->core->children)) {
+               struct clk_core *child;
                struct hlist_node *t;
 
                /* Reparent all children to the orphan list. */
-               hlist_for_each_entry_safe(child, t, &clk->children, child_node)
-                       clk_set_parent(child, NULL);
+               hlist_for_each_entry_safe(child, t, &clk->core->children,
+                                         child_node)
+                       clk_core_set_parent(child, NULL);
        }
 
-       hlist_del_init(&clk->child_node);
+       hlist_del_init(&clk->core->child_node);
 
-       if (clk->prepare_count)
+       if (clk->core->prepare_count)
                pr_warn("%s: unregistering prepared clock: %s\n",
-                                       __func__, clk->name);
-       kref_put(&clk->ref, __clk_release);
+                                       __func__, clk->core->name);
+       kref_put(&clk->core->ref, __clk_release);
 
        clk_prepare_unlock();
 }
@@ -2263,11 +2643,13 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister);
  */
 int __clk_get(struct clk *clk)
 {
-       if (clk) {
-               if (!try_module_get(clk->owner))
+       struct clk_core *core = !clk ? NULL : clk->core;
+
+       if (core) {
+               if (!try_module_get(core->owner))
                        return 0;
 
-               kref_get(&clk->ref);
+               kref_get(&core->ref);
        }
        return 1;
 }
@@ -2280,11 +2662,20 @@ void __clk_put(struct clk *clk)
                return;
 
        clk_prepare_lock();
-       owner = clk->owner;
-       kref_put(&clk->ref, __clk_release);
+
+       hlist_del(&clk->child_node);
+       if (clk->min_rate > clk->core->req_rate ||
+           clk->max_rate < clk->core->req_rate)
+               clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
+
+       owner = clk->core->owner;
+       kref_put(&clk->core->ref, __clk_release);
+
        clk_prepare_unlock();
 
        module_put(owner);
+
+       kfree(clk);
 }
 
 /***        clk rate change notifiers        ***/
@@ -2339,7 +2730,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
 
        ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
 
-       clk->notifier_count++;
+       clk->core->notifier_count++;
 
 out:
        clk_prepare_unlock();
@@ -2376,7 +2767,7 @@ int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
        if (cn->clk == clk) {
                ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
 
-               clk->notifier_count--;
+               clk->core->notifier_count--;
 
                /* XXX the notifier code should handle this better */
                if (!cn->notifier_head.head) {
@@ -2506,7 +2897,8 @@ void of_clk_del_provider(struct device_node *np)
 }
 EXPORT_SYMBOL_GPL(of_clk_del_provider);
 
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
+                                      const char *dev_id, const char *con_id)
 {
        struct of_clk_provider *provider;
        struct clk *clk = ERR_PTR(-EPROBE_DEFER);
@@ -2515,8 +2907,17 @@ struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec)
        list_for_each_entry(provider, &of_clk_providers, link) {
                if (provider->node == clkspec->np)
                        clk = provider->get(clkspec, provider->data);
-               if (!IS_ERR(clk))
+               if (!IS_ERR(clk)) {
+                       clk = __clk_create_clk(__clk_get_hw(clk), dev_id,
+                                              con_id);
+
+                       if (!IS_ERR(clk) && !__clk_get(clk)) {
+                               __clk_free_clk(clk);
+                               clk = ERR_PTR(-ENOENT);
+                       }
+
                        break;
+               }
        }
 
        return clk;
@@ -2527,7 +2928,7 @@ struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
        struct clk *clk;
 
        mutex_lock(&of_clk_mutex);
-       clk = __of_clk_get_from_provider(clkspec);
+       clk = __of_clk_get_from_provider(clkspec, NULL, __func__);
        mutex_unlock(&of_clk_mutex);
 
        return clk;
index c798138f023f6ae6a38c252c0946135a82814eba..ba845408cc3e8515ef8c9457f686727b57027185 100644 (file)
@@ -9,9 +9,31 @@
  * published by the Free Software Foundation.
  */
 
+struct clk_hw;
+
 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
 struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec);
-struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec);
+struct clk *__of_clk_get_from_provider(struct of_phandle_args *clkspec,
+                                      const char *dev_id, const char *con_id);
 void of_clk_lock(void);
 void of_clk_unlock(void);
 #endif
+
+#ifdef CONFIG_COMMON_CLK
+struct clk *__clk_create_clk(struct clk_hw *hw, const char *dev_id,
+                            const char *con_id);
+void __clk_free_clk(struct clk *clk);
+#else
+/* All these casts to avoid ifdefs in clkdev... */
+static inline struct clk *
+__clk_create_clk(struct clk_hw *hw, const char *dev_id, const char *con_id)
+{
+       return (struct clk *)hw;
+}
+static inline void __clk_free_clk(struct clk *clk) { }
+static struct clk_hw *__clk_get_hw(struct clk *clk)
+{
+       return (struct clk_hw *)clk;
+}
+
+#endif
index da4bda8b7fc7e99d24598b041978d5c19bfa52f6..043fd3633373982f0408de1618370c6d63d5887b 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/mutex.h>
 #include <linux/clk.h>
 #include <linux/clkdev.h>
+#include <linux/clk-provider.h>
 #include <linux/of.h>
 
 #include "clk.h"
@@ -28,6 +29,20 @@ static DEFINE_MUTEX(clocks_mutex);
 
 #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK)
 
+static struct clk *__of_clk_get_by_clkspec(struct of_phandle_args *clkspec,
+                                        const char *dev_id, const char *con_id)
+{
+       struct clk *clk;
+
+       if (!clkspec)
+               return ERR_PTR(-EINVAL);
+
+       of_clk_lock();
+       clk = __of_clk_get_from_provider(clkspec, dev_id, con_id);
+       of_clk_unlock();
+       return clk;
+}
+
 /**
  * of_clk_get_by_clkspec() - Lookup a clock form a clock provider
  * @clkspec: pointer to a clock specifier data structure
@@ -38,22 +53,11 @@ static DEFINE_MUTEX(clocks_mutex);
  */
 struct clk *of_clk_get_by_clkspec(struct of_phandle_args *clkspec)
 {
-       struct clk *clk;
-
-       if (!clkspec)
-               return ERR_PTR(-EINVAL);
-
-       of_clk_lock();
-       clk = __of_clk_get_from_provider(clkspec);
-
-       if (!IS_ERR(clk) && !__clk_get(clk))
-               clk = ERR_PTR(-ENOENT);
-
-       of_clk_unlock();
-       return clk;
+       return __of_clk_get_by_clkspec(clkspec, NULL, __func__);
 }
 
-struct clk *of_clk_get(struct device_node *np, int index)
+static struct clk *__of_clk_get(struct device_node *np, int index,
+                              const char *dev_id, const char *con_id)
 {
        struct of_phandle_args clkspec;
        struct clk *clk;
@@ -67,22 +71,21 @@ struct clk *of_clk_get(struct device_node *np, int index)
        if (rc)
                return ERR_PTR(rc);
 
-       clk = of_clk_get_by_clkspec(&clkspec);
+       clk = __of_clk_get_by_clkspec(&clkspec, dev_id, con_id);
        of_node_put(clkspec.np);
+
        return clk;
 }
+
+struct clk *of_clk_get(struct device_node *np, int index)
+{
+       return __of_clk_get(np, index, np->full_name, NULL);
+}
 EXPORT_SYMBOL(of_clk_get);
 
-/**
- * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
- * @np: pointer to clock consumer node
- * @name: name of consumer's clock input, or NULL for the first clock reference
- *
- * This function parses the clocks and clock-names properties,
- * and uses them to look up the struct clk from the registered list of clock
- * providers.
- */
-struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
+static struct clk *__of_clk_get_by_name(struct device_node *np,
+                                       const char *dev_id,
+                                       const char *name)
 {
        struct clk *clk = ERR_PTR(-ENOENT);
 
@@ -97,10 +100,10 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
                 */
                if (name)
                        index = of_property_match_string(np, "clock-names", name);
-               clk = of_clk_get(np, index);
-               if (!IS_ERR(clk))
+               clk = __of_clk_get(np, index, dev_id, name);
+               if (!IS_ERR(clk)) {
                        break;
-               else if (name && index >= 0) {
+               else if (name && index >= 0) {
                        if (PTR_ERR(clk) != -EPROBE_DEFER)
                                pr_err("ERROR: could not get clock %s:%s(%i)\n",
                                        np->full_name, name ? name : "", index);
@@ -119,7 +122,33 @@ struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
 
        return clk;
 }
+
+/**
+ * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
+ * @np: pointer to clock consumer node
+ * @name: name of consumer's clock input, or NULL for the first clock reference
+ *
+ * This function parses the clocks and clock-names properties,
+ * and uses them to look up the struct clk from the registered list of clock
+ * providers.
+ */
+struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
+{
+       if (!np)
+               return ERR_PTR(-ENOENT);
+
+       return __of_clk_get_by_name(np, np->full_name, name);
+}
 EXPORT_SYMBOL(of_clk_get_by_name);
+
+#else /* defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) */
+
+static struct clk *__of_clk_get_by_name(struct device_node *np,
+                                       const char *dev_id,
+                                       const char *name)
+{
+       return ERR_PTR(-ENOENT);
+}
 #endif
 
 /*
@@ -168,14 +197,28 @@ static struct clk_lookup *clk_find(const char *dev_id, const char *con_id)
 struct clk *clk_get_sys(const char *dev_id, const char *con_id)
 {
        struct clk_lookup *cl;
+       struct clk *clk = NULL;
 
        mutex_lock(&clocks_mutex);
+
        cl = clk_find(dev_id, con_id);
-       if (cl && !__clk_get(cl->clk))
+       if (!cl)
+               goto out;
+
+       clk = __clk_create_clk(__clk_get_hw(cl->clk), dev_id, con_id);
+       if (IS_ERR(clk))
+               goto out;
+
+       if (!__clk_get(clk)) {
+               __clk_free_clk(clk);
                cl = NULL;
+               goto out;
+       }
+
+out:
        mutex_unlock(&clocks_mutex);
 
-       return cl ? cl->clk : ERR_PTR(-ENOENT);
+       return cl ? clk : ERR_PTR(-ENOENT);
 }
 EXPORT_SYMBOL(clk_get_sys);
 
@@ -185,10 +228,8 @@ struct clk *clk_get(struct device *dev, const char *con_id)
        struct clk *clk;
 
        if (dev) {
-               clk = of_clk_get_by_name(dev->of_node, con_id);
-               if (!IS_ERR(clk))
-                       return clk;
-               if (PTR_ERR(clk) == -EPROBE_DEFER)
+               clk = __of_clk_get_by_name(dev->of_node, dev_id, con_id);
+               if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER)
                        return clk;
        }
 
@@ -331,6 +372,7 @@ int clk_register_clkdev(struct clk *clk, const char *con_id,
 
        return 0;
 }
+EXPORT_SYMBOL(clk_register_clkdev);
 
 /**
  * clk_register_clkdevs - register a set of clk_lookup for a struct clk
index 007144f81f50b63301f211103dd200f7f21a4ec4..2e4f6d432bebeb21b6c543494764320407b9a488 100644 (file)
@@ -295,6 +295,8 @@ static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw,
 }
 
 static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
                              unsigned long *best_parent_rate,
                              struct clk_hw **best_parent_p)
 {
index 48fa53c7ce5e0dbf4cfb7f5eb30abe0104015ef8..de6a873175d2b833f64d95f8e2bf5729da6fd1b4 100644 (file)
@@ -202,6 +202,8 @@ error:
 }
 
 static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk)
 {
index 38e9153446059a1fb0c0728c841517cf99747f40..38e37bf6b821d8c6207c73179c36f98977933578 100644 (file)
@@ -1,3 +1,4 @@
 obj-y                          += clk-pxa.o
 obj-$(CONFIG_PXA25x)           += clk-pxa25x.o
 obj-$(CONFIG_PXA27x)           += clk-pxa27x.o
+obj-$(CONFIG_PXA3xx)           += clk-pxa3xx.o
index 4e834753ab094500677811702c37ad005503929b..29cee9e8d4d91bf34312207b7016e2f05c7d2746 100644 (file)
@@ -46,7 +46,7 @@ static unsigned long cken_recalc_rate(struct clk_hw *hw,
                fix = &pclk->lp;
        else
                fix = &pclk->hp;
-       fix->hw.clk = hw->clk;
+       __clk_hw_set_clk(&fix->hw, hw);
        return clk_fixed_factor_ops.recalc_rate(&fix->hw, parent_rate);
 }
 
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
new file mode 100644 (file)
index 0000000..39f891b
--- /dev/null
@@ -0,0 +1,364 @@
+/*
+ * Marvell PXA3xxx family clocks
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * Heavily inspired from former arch/arm/mach-pxa/pxa3xx.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * For non-devicetree platforms. Once pxa is fully converted to devicetree, this
+ * should go away.
+ */
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <mach/smemc.h>
+#include <mach/pxa3xx-regs.h>
+
+#include <dt-bindings/clock/pxa-clock.h>
+#include "clk-pxa.h"
+
+#define KHz 1000
+#define MHz (1000 * 1000)
+
+enum {
+       PXA_CORE_60Mhz = 0,
+       PXA_CORE_RUN,
+       PXA_CORE_TURBO,
+};
+
+enum {
+       PXA_BUS_60Mhz = 0,
+       PXA_BUS_HSS,
+};
+
+/* crystal frequency to HSIO bus frequency multiplier (HSS) */
+static unsigned char hss_mult[4] = { 8, 12, 16, 24 };
+
+/* crystal frequency to static memory controller multiplier (SMCFS) */
+static unsigned int smcfs_mult[8] = { 6, 0, 8, 0, 0, 16, };
+static unsigned int df_clkdiv[4] = { 1, 2, 4, 1 };
+
+static const char * const get_freq_khz[] = {
+       "core", "ring_osc_60mhz", "run", "cpll", "system_bus"
+};
+
+/*
+ * Get the clock frequency as reflected by ACSR and the turbo flag.
+ * We assume these values have been applied via a fcs.
+ * If info is not 0 we also display the current settings.
+ */
+unsigned int pxa3xx_get_clk_frequency_khz(int info)
+{
+       struct clk *clk;
+       unsigned long clks[5];
+       int i;
+
+       for (i = 0; i < 5; i++) {
+               clk = clk_get(NULL, get_freq_khz[i]);
+               if (IS_ERR(clk)) {
+                       clks[i] = 0;
+               } else {
+                       clks[i] = clk_get_rate(clk);
+                       clk_put(clk);
+               }
+       }
+       if (info) {
+               pr_info("RO Mode clock: %ld.%02ldMHz\n",
+                       clks[1] / 1000000, (clks[0] % 1000000) / 10000);
+               pr_info("Run Mode clock: %ld.%02ldMHz\n",
+                       clks[2] / 1000000, (clks[1] % 1000000) / 10000);
+               pr_info("Turbo Mode clock: %ld.%02ldMHz\n",
+                       clks[3] / 1000000, (clks[2] % 1000000) / 10000);
+               pr_info("System bus clock: %ld.%02ldMHz\n",
+                       clks[4] / 1000000, (clks[4] % 1000000) / 10000);
+       }
+       return (unsigned int)clks[0];
+}
+
+static unsigned long clk_pxa3xx_ac97_get_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       unsigned long ac97_div, rate;
+
+       ac97_div = AC97_DIV;
+
+       /* This may loose precision for some rates but won't for the
+        * standard 24.576MHz.
+        */
+       rate = parent_rate / 2;
+       rate /= ((ac97_div >> 12) & 0x7fff);
+       rate *= (ac97_div & 0xfff);
+
+       return rate;
+}
+PARENTS(clk_pxa3xx_ac97) = { "spll_624mhz" };
+RATE_RO_OPS(clk_pxa3xx_ac97, "ac97");
+
+static unsigned long clk_pxa3xx_smemc_get_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       unsigned long acsr = ACSR;
+       unsigned long memclkcfg = __raw_readl(MEMCLKCFG);
+
+       return (parent_rate / 48)  * smcfs_mult[(acsr >> 23) & 0x7] /
+               df_clkdiv[(memclkcfg >> 16) & 0x3];
+}
+PARENTS(clk_pxa3xx_smemc) = { "spll_624mhz" };
+RATE_RO_OPS(clk_pxa3xx_smemc, "smemc");
+
+static bool pxa3xx_is_ring_osc_forced(void)
+{
+       unsigned long acsr = ACSR;
+
+       return acsr & ACCR_D0CS;
+}
+
+PARENTS(pxa3xx_pbus) = { "ring_osc_60mhz", "spll_624mhz" };
+PARENTS(pxa3xx_32Khz_bus) = { "osc_32_768khz", "osc_32_768khz" };
+PARENTS(pxa3xx_13MHz_bus) = { "osc_13mhz", "osc_13mhz" };
+PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
+PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
+PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
+
+#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB)
+#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
+                   div_hp, bit, is_lp, flags)                          \
+       PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp,         \
+                mult_hp, div_hp, is_lp,  CKEN_AB(bit),                 \
+                (CKEN_ ## bit % 32), flags)
+#define PXA3XX_PBUS_CKEN(dev_id, con_id, bit, mult_lp, div_lp,         \
+                        mult_hp, div_hp, delay)                        \
+       PXA3XX_CKEN(dev_id, con_id, pxa3xx_pbus_parents, mult_lp,       \
+                   div_lp, mult_hp, div_hp, bit, pxa3xx_is_ring_osc_forced, 0)
+#define PXA3XX_CKEN_1RATE(dev_id, con_id, bit, parents)                        \
+       PXA_CKEN_1RATE(dev_id, con_id, bit, parents,                    \
+                      CKEN_AB(bit), (CKEN_ ## bit % 32), 0)
+
+static struct desc_clk_cken pxa3xx_clocks[] __initdata = {
+       PXA3XX_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 1, 4, 1, 42, 1),
+       PXA3XX_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 1, 4, 1, 42, 1),
+       PXA3XX_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 1, 4, 1, 42, 1),
+       PXA3XX_PBUS_CKEN("pxa2xx-i2c.0", NULL, I2C, 2, 5, 1, 19, 0),
+       PXA3XX_PBUS_CKEN("pxa27x-udc", NULL, UDC, 1, 4, 1, 13, 5),
+       PXA3XX_PBUS_CKEN("pxa27x-ohci", NULL, USBH, 1, 4, 1, 13, 0),
+       PXA3XX_PBUS_CKEN("pxa3xx-u2d", NULL, USB2, 1, 4, 1, 13, 0),
+       PXA3XX_PBUS_CKEN("pxa27x-pwm.0", NULL, PWM0, 1, 6, 1, 48, 0),
+       PXA3XX_PBUS_CKEN("pxa27x-pwm.1", NULL, PWM1, 1, 6, 1, 48, 0),
+       PXA3XX_PBUS_CKEN("pxa2xx-mci.0", NULL, MMC1, 1, 4, 1, 24, 0),
+       PXA3XX_PBUS_CKEN("pxa2xx-mci.1", NULL, MMC2, 1, 4, 1, 24, 0),
+       PXA3XX_PBUS_CKEN("pxa2xx-mci.2", NULL, MMC3, 1, 4, 1, 24, 0),
+
+       PXA3XX_CKEN_1RATE("pxa27x-keypad", NULL, KEYPAD,
+                         pxa3xx_32Khz_bus_parents),
+       PXA3XX_CKEN_1RATE("pxa3xx-ssp.0", NULL, SSP1, pxa3xx_13MHz_bus_parents),
+       PXA3XX_CKEN_1RATE("pxa3xx-ssp.1", NULL, SSP2, pxa3xx_13MHz_bus_parents),
+       PXA3XX_CKEN_1RATE("pxa3xx-ssp.2", NULL, SSP3, pxa3xx_13MHz_bus_parents),
+       PXA3XX_CKEN_1RATE("pxa3xx-ssp.3", NULL, SSP4, pxa3xx_13MHz_bus_parents),
+
+       PXA3XX_CKEN(NULL, "AC97CLK", pxa3xx_ac97_bus_parents, 1, 4, 1, 1, AC97,
+                   pxa3xx_is_ring_osc_forced, 0),
+       PXA3XX_CKEN(NULL, "CAMCLK", pxa3xx_sbus_parents, 1, 2, 1, 1, CAMERA,
+                   pxa3xx_is_ring_osc_forced, 0),
+       PXA3XX_CKEN("pxa2xx-fb", NULL, pxa3xx_sbus_parents, 1, 1, 1, 1, LCD,
+                   pxa3xx_is_ring_osc_forced, 0),
+       PXA3XX_CKEN("pxa2xx-pcmcia", NULL, pxa3xx_smemcbus_parents, 1, 4,
+                   1, 1, SMC, pxa3xx_is_ring_osc_forced, CLK_IGNORE_UNUSED),
+};
+
+static struct desc_clk_cken pxa300_310_clocks[] __initdata = {
+
+       PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0),
+       PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0),
+       PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
+};
+
+static struct desc_clk_cken pxa320_clocks[] __initdata = {
+       PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 6, 0),
+       PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA320_GCU, 1, 1, 1, 1, 0),
+       PXA3XX_CKEN_1RATE("pxa3xx-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
+};
+
+static struct desc_clk_cken pxa93x_clocks[] __initdata = {
+
+       PXA3XX_PBUS_CKEN("pxa3xx-gcu", NULL, PXA300_GCU, 1, 1, 1, 1, 0),
+       PXA3XX_PBUS_CKEN("pxa3xx-nand", NULL, NAND, 1, 2, 1, 4, 0),
+       PXA3XX_CKEN_1RATE("pxa93x-gpio", NULL, GPIO, pxa3xx_13MHz_bus_parents),
+};
+
+static unsigned long clk_pxa3xx_system_bus_get_rate(struct clk_hw *hw,
+                                           unsigned long parent_rate)
+{
+       unsigned long acsr = ACSR;
+       unsigned int hss = (acsr >> 14) & 0x3;
+
+       if (pxa3xx_is_ring_osc_forced())
+               return parent_rate;
+       return parent_rate / 48 * hss_mult[hss];
+}
+
+static u8 clk_pxa3xx_system_bus_get_parent(struct clk_hw *hw)
+{
+       if (pxa3xx_is_ring_osc_forced())
+               return PXA_BUS_60Mhz;
+       else
+               return PXA_BUS_HSS;
+}
+
+PARENTS(clk_pxa3xx_system_bus) = { "ring_osc_60mhz", "spll_624mhz" };
+MUX_RO_RATE_RO_OPS(clk_pxa3xx_system_bus, "system_bus");
+
+static unsigned long clk_pxa3xx_core_get_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       return parent_rate;
+}
+
+static u8 clk_pxa3xx_core_get_parent(struct clk_hw *hw)
+{
+       unsigned long xclkcfg;
+       unsigned int t;
+
+       if (pxa3xx_is_ring_osc_forced())
+               return PXA_CORE_60Mhz;
+
+       /* Read XCLKCFG register turbo bit */
+       __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
+       t = xclkcfg & 0x1;
+
+       if (t)
+               return PXA_CORE_TURBO;
+       return PXA_CORE_RUN;
+}
+PARENTS(clk_pxa3xx_core) = { "ring_osc_60mhz", "run", "cpll" };
+MUX_RO_RATE_RO_OPS(clk_pxa3xx_core, "core");
+
+static unsigned long clk_pxa3xx_run_get_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       unsigned long acsr = ACSR;
+       unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
+       unsigned int t, xclkcfg;
+
+       /* Read XCLKCFG register turbo bit */
+       __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
+       t = xclkcfg & 0x1;
+
+       return t ? (parent_rate / xn) * 2 : parent_rate;
+}
+PARENTS(clk_pxa3xx_run) = { "cpll" };
+RATE_RO_OPS(clk_pxa3xx_run, "run");
+
+static unsigned long clk_pxa3xx_cpll_get_rate(struct clk_hw *hw,
+       unsigned long parent_rate)
+{
+       unsigned long acsr = ACSR;
+       unsigned int xn = (acsr & ACCR_XN_MASK) >> 8;
+       unsigned int xl = acsr & ACCR_XL_MASK;
+       unsigned int t, xclkcfg;
+
+       /* Read XCLKCFG register turbo bit */
+       __asm__ __volatile__("mrc\tp14, 0, %0, c6, c0, 0" : "=r"(xclkcfg));
+       t = xclkcfg & 0x1;
+
+       pr_info("RJK: parent_rate=%lu, xl=%u, xn=%u\n", parent_rate, xl, xn);
+       return t ? parent_rate * xl * xn : parent_rate * xl;
+}
+PARENTS(clk_pxa3xx_cpll) = { "osc_13mhz" };
+RATE_RO_OPS(clk_pxa3xx_cpll, "cpll");
+
+static void __init pxa3xx_register_core(void)
+{
+       clk_register_clk_pxa3xx_cpll();
+       clk_register_clk_pxa3xx_run();
+
+       clkdev_pxa_register(CLK_CORE, "core", NULL,
+                           clk_register_clk_pxa3xx_core());
+}
+
+static void __init pxa3xx_register_plls(void)
+{
+       clk_register_fixed_rate(NULL, "osc_13mhz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               13 * MHz);
+       clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               32768);
+       clk_register_fixed_rate(NULL, "ring_osc_120mhz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               120 * MHz);
+       clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+       clk_register_fixed_factor(NULL, "spll_624mhz", "osc_13mhz", 0, 48, 1);
+       clk_register_fixed_factor(NULL, "ring_osc_60mhz", "ring_osc_120mhz",
+                                 0, 1, 2);
+}
+
+#define DUMMY_CLK(_con_id, _dev_id, _parent) \
+       { .con_id = _con_id, .dev_id = _dev_id, .parent = _parent }
+struct dummy_clk {
+       const char *con_id;
+       const char *dev_id;
+       const char *parent;
+};
+static struct dummy_clk dummy_clks[] __initdata = {
+       DUMMY_CLK(NULL, "pxa93x-gpio", "osc_13mhz"),
+       DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
+       DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
+       DUMMY_CLK(NULL, "pxa3xx-pwri2c.1", "osc_13mhz"),
+};
+
+static void __init pxa3xx_dummy_clocks_init(void)
+{
+       struct clk *clk;
+       struct dummy_clk *d;
+       const char *name;
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(dummy_clks); i++) {
+               d = &dummy_clks[i];
+               name = d->dev_id ? d->dev_id : d->con_id;
+               clk = clk_register_fixed_factor(NULL, name, d->parent, 0, 1, 1);
+               clk_register_clkdev(clk, d->con_id, d->dev_id);
+       }
+}
+
+static void __init pxa3xx_base_clocks_init(void)
+{
+       pxa3xx_register_plls();
+       pxa3xx_register_core();
+       clk_register_clk_pxa3xx_system_bus();
+       clk_register_clk_pxa3xx_ac97();
+       clk_register_clk_pxa3xx_smemc();
+       clk_register_gate(NULL, "CLK_POUT", "osc_13mhz", 0,
+                         (void __iomem *)&OSCC, 11, 0, NULL);
+}
+
+int __init pxa3xx_clocks_init(void)
+{
+       int ret;
+
+       pxa3xx_base_clocks_init();
+       pxa3xx_dummy_clocks_init();
+       ret = clk_pxa_cken_init(pxa3xx_clocks, ARRAY_SIZE(pxa3xx_clocks));
+       if (ret)
+               return ret;
+       if (cpu_is_pxa320())
+               return clk_pxa_cken_init(pxa320_clocks,
+                                        ARRAY_SIZE(pxa320_clocks));
+       if (cpu_is_pxa300() || cpu_is_pxa310())
+               return clk_pxa_cken_init(pxa300_310_clocks,
+                                        ARRAY_SIZE(pxa300_310_clocks));
+       return clk_pxa_cken_init(pxa93x_clocks, ARRAY_SIZE(pxa93x_clocks));
+}
+
+static void __init pxa3xx_dt_clocks_init(struct device_node *np)
+{
+       pxa3xx_clocks_init();
+       clk_pxa_dt_common_init(np);
+}
+CLK_OF_DECLARE(pxa_clks, "marvell,pxa300-clocks", pxa3xx_dt_clocks_init);
index 1107351ed34682e7cd8fa04ffcd61c0e4bc146f9..0d7ab52b7ab0076fec09340410c09bca0efe3b4f 100644 (file)
@@ -29,6 +29,15 @@ config IPQ_GCC_806X
          Say Y if you want to use peripheral devices such as UART, SPI,
          i2c, USB, SD/eMMC, etc.
 
+config IPQ_LCC_806X
+       tristate "IPQ806x LPASS Clock Controller"
+       select IPQ_GCC_806X
+       depends on COMMON_CLK_QCOM
+       help
+         Support for the LPASS clock controller on ipq806x devices.
+         Say Y if you want to use audio devices such as i2s, pcm,
+         S/PDIF, etc.
+
 config MSM_GCC_8660
        tristate "MSM8660 Global Clock Controller"
        depends on COMMON_CLK_QCOM
@@ -45,6 +54,15 @@ config MSM_GCC_8960
          Say Y if you want to use peripheral devices such as UART, SPI,
          i2c, USB, SD/eMMC, SATA, PCIe, etc.
 
+config MSM_LCC_8960
+       tristate "APQ8064/MSM8960 LPASS Clock Controller"
+       select MSM_GCC_8960
+       depends on COMMON_CLK_QCOM
+       help
+         Support for the LPASS clock controller on apq8064/msm8960 devices.
+         Say Y if you want to use audio devices such as i2s, pcm,
+         SLIMBus, etc.
+
 config MSM_MMCC_8960
        tristate "MSM8960 Multimedia Clock Controller"
        select MSM_GCC_8960
index 783cfb24faa41cbbac81e676d5ed129733529aac..61782646959534bec2b19d445762abbec7fb1591 100644 (file)
@@ -6,13 +6,17 @@ clk-qcom-y += clk-pll.o
 clk-qcom-y += clk-rcg.o
 clk-qcom-y += clk-rcg2.o
 clk-qcom-y += clk-branch.o
+clk-qcom-y += clk-regmap-divider.o
+clk-qcom-y += clk-regmap-mux.o
 clk-qcom-y += reset.o
 
 obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
 obj-$(CONFIG_APQ_MMCC_8084) += mmcc-apq8084.o
 obj-$(CONFIG_IPQ_GCC_806X) += gcc-ipq806x.o
+obj-$(CONFIG_IPQ_LCC_806X) += lcc-ipq806x.o
 obj-$(CONFIG_MSM_GCC_8660) += gcc-msm8660.o
 obj-$(CONFIG_MSM_GCC_8960) += gcc-msm8960.o
+obj-$(CONFIG_MSM_LCC_8960) += lcc-msm8960.o
 obj-$(CONFIG_MSM_GCC_8974) += gcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8960) += mmcc-msm8960.o
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
index 60873a7f45d94b3687bb342b86dd7bbfb9db67df..b4325f65a1bf6f225811936c9a00927391c62787 100644 (file)
@@ -141,6 +141,7 @@ struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate)
 
 static long
 clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
+                      unsigned long min_rate, unsigned long max_rate,
                       unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_pll *pll = to_clk_pll(hw);
index 0b93972c8807f11ef5e27bf6eb9f14e85b9f1055..0039bd7d3965370108bba619abdf40009495e693 100644 (file)
@@ -368,6 +368,7 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
 
 static long _freq_tbl_determine_rate(struct clk_hw *hw,
                const struct freq_tbl *f, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p_hw)
 {
        unsigned long clk_flags;
@@ -397,22 +398,27 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
 }
 
 static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg *rcg = to_clk_rcg(hw);
 
-       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
+                       max_rate, p_rate, p);
 }
 
 static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
 
-       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, p_rate, p);
+       return _freq_tbl_determine_rate(hw, rcg->freq_tbl, rate, min_rate,
+                       max_rate, p_rate, p);
 }
 
 static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p_hw)
 {
        struct clk_rcg *rcg = to_clk_rcg(hw);
index 08b8b3729f539ee769f15d1f474e8c3718d1e640..742acfa18d63798c19c25884ef2b50d508965858 100644 (file)
@@ -208,6 +208,7 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
 }
 
 static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long min_rate, unsigned long max_rate,
                unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -361,6 +362,8 @@ static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
 }
 
 static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long min_rate,
+                                unsigned long max_rate,
                                 unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -412,6 +415,7 @@ const struct clk_ops clk_edp_pixel_ops = {
 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
 
 static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
+                        unsigned long min_rate, unsigned long max_rate,
                         unsigned long *p_rate, struct clk_hw **p_hw)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
@@ -476,6 +480,8 @@ static const struct frac_entry frac_table_pixel[] = {
 };
 
 static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long min_rate,
+                                unsigned long max_rate,
                                 unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
diff --git a/drivers/clk/qcom/clk-regmap-divider.c b/drivers/clk/qcom/clk-regmap-divider.c
new file mode 100644 (file)
index 0000000..5348491
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap-divider.h"
+
+static inline struct clk_regmap_div *to_clk_regmap_div(struct clk_hw *hw)
+{
+       return container_of(to_clk_regmap(hw), struct clk_regmap_div, clkr);
+}
+
+static long div_round_rate(struct clk_hw *hw, unsigned long rate,
+                          unsigned long *prate)
+{
+       struct clk_regmap_div *divider = to_clk_regmap_div(hw);
+
+       return divider_round_rate(hw, rate, prate, NULL, divider->width,
+                                 CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+static int div_set_rate(struct clk_hw *hw, unsigned long rate,
+                       unsigned long parent_rate)
+{
+       struct clk_regmap_div *divider = to_clk_regmap_div(hw);
+       struct clk_regmap *clkr = &divider->clkr;
+       u32 div;
+
+       div = divider_get_val(rate, parent_rate, NULL, divider->width,
+                             CLK_DIVIDER_ROUND_CLOSEST);
+
+       return regmap_update_bits(clkr->regmap, divider->reg,
+                                 (BIT(divider->width) - 1) << divider->shift,
+                                 div << divider->shift);
+}
+
+static unsigned long div_recalc_rate(struct clk_hw *hw,
+                                    unsigned long parent_rate)
+{
+       struct clk_regmap_div *divider = to_clk_regmap_div(hw);
+       struct clk_regmap *clkr = &divider->clkr;
+       u32 div;
+
+       regmap_read(clkr->regmap, divider->reg, &div);
+       div >>= divider->shift;
+       div &= BIT(divider->width) - 1;
+
+       return divider_recalc_rate(hw, parent_rate, div, NULL,
+                                  CLK_DIVIDER_ROUND_CLOSEST);
+}
+
+const struct clk_ops clk_regmap_div_ops = {
+       .round_rate = div_round_rate,
+       .set_rate = div_set_rate,
+       .recalc_rate = div_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_div_ops);
diff --git a/drivers/clk/qcom/clk-regmap-divider.h b/drivers/clk/qcom/clk-regmap-divider.h
new file mode 100644 (file)
index 0000000..fc4492e
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_DIVIDER_H__
+#define __QCOM_CLK_REGMAP_DIVIDER_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct clk_regmap_div {
+       u32                     reg;
+       u32                     shift;
+       u32                     width;
+       struct clk_regmap       clkr;
+};
+
+extern const struct clk_ops clk_regmap_div_ops;
+
+#endif
diff --git a/drivers/clk/qcom/clk-regmap-mux.c b/drivers/clk/qcom/clk-regmap-mux.c
new file mode 100644 (file)
index 0000000..cae3071
--- /dev/null
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/regmap.h>
+#include <linux/export.h>
+
+#include "clk-regmap-mux.h"
+
+static inline struct clk_regmap_mux *to_clk_regmap_mux(struct clk_hw *hw)
+{
+       return container_of(to_clk_regmap(hw), struct clk_regmap_mux, clkr);
+}
+
+static u8 mux_get_parent(struct clk_hw *hw)
+{
+       struct clk_regmap_mux *mux = to_clk_regmap_mux(hw);
+       struct clk_regmap *clkr = to_clk_regmap(hw);
+       unsigned int mask = GENMASK(mux->width - 1, 0);
+       unsigned int val;
+
+       regmap_read(clkr->regmap, mux->reg, &val);
+
+       val >>= mux->shift;
+       val &= mask;
+
+       return val;
+}
+
+static int mux_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct clk_regmap_mux *mux = to_clk_regmap_mux(hw);
+       struct clk_regmap *clkr = to_clk_regmap(hw);
+       unsigned int mask = GENMASK(mux->width + mux->shift - 1, mux->shift);
+       unsigned int val;
+
+       val = index;
+       val <<= mux->shift;
+
+       return regmap_update_bits(clkr->regmap, mux->reg, mask, val);
+}
+
+const struct clk_ops clk_regmap_mux_closest_ops = {
+       .get_parent = mux_get_parent,
+       .set_parent = mux_set_parent,
+       .determine_rate = __clk_mux_determine_rate_closest,
+};
+EXPORT_SYMBOL_GPL(clk_regmap_mux_closest_ops);
diff --git a/drivers/clk/qcom/clk-regmap-mux.h b/drivers/clk/qcom/clk-regmap-mux.h
new file mode 100644 (file)
index 0000000..5cec761
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CLK_REGMAP_MUX_H__
+#define __QCOM_CLK_REGMAP_MUX_H__
+
+#include <linux/clk-provider.h>
+#include "clk-regmap.h"
+
+struct clk_regmap_mux {
+       u32                     reg;
+       u32                     shift;
+       u32                     width;
+       struct clk_regmap       clkr;
+};
+
+extern const struct clk_ops clk_regmap_mux_closest_ops;
+
+#endif
index afed5eb0691e101c246275b1b6a80d44de9d1eed..cbdc31dea7f4311d091b938959179b3dd97e0fbe 100644 (file)
@@ -75,6 +75,17 @@ static struct clk_pll pll3 = {
        },
 };
 
+static struct clk_regmap pll4_vote = {
+       .enable_reg = 0x34c0,
+       .enable_mask = BIT(4),
+       .hw.init = &(struct clk_init_data){
+               .name = "pll4_vote",
+               .parent_names = (const char *[]){ "pll4" },
+               .num_parents = 1,
+               .ops = &clk_pll_vote_ops,
+       },
+};
+
 static struct clk_pll pll8 = {
        .l_reg = 0x3144,
        .m_reg = 0x3148,
@@ -2163,6 +2174,7 @@ static struct clk_regmap *gcc_ipq806x_clks[] = {
        [PLL0] = &pll0.clkr,
        [PLL0_VOTE] = &pll0_vote,
        [PLL3] = &pll3.clkr,
+       [PLL4_VOTE] = &pll4_vote,
        [PLL8] = &pll8.clkr,
        [PLL8_VOTE] = &pll8_vote,
        [PLL14] = &pll14.clkr,
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
new file mode 100644 (file)
index 0000000..121ffde
--- /dev/null
@@ -0,0 +1,473 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lcc-ipq806x.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+
+static struct clk_pll pll4 = {
+       .l_reg = 0x4,
+       .m_reg = 0x8,
+       .n_reg = 0xc,
+       .config_reg = 0x14,
+       .mode_reg = 0x0,
+       .status_reg = 0x18,
+       .status_bit = 16,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll4",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+static const struct pll_config pll4_config = {
+       .l = 0xf,
+       .m = 0x91,
+       .n = 0xc7,
+       .vco_val = 0x0,
+       .vco_mask = BIT(17) | BIT(16),
+       .pre_div_val = 0x0,
+       .pre_div_mask = BIT(19),
+       .post_div_val = 0x0,
+       .post_div_mask = BIT(21) | BIT(20),
+       .mn_ena_mask = BIT(22),
+       .main_output_mask = BIT(23),
+};
+
+#define P_PXO  0
+#define P_PLL4 1
+
+static const u8 lcc_pxo_pll4_map[] = {
+       [P_PXO]         = 0,
+       [P_PLL4]        = 2,
+};
+
+static const char *lcc_pxo_pll4[] = {
+       "pxo",
+       "pll4_vote",
+};
+
+static struct freq_tbl clk_tbl_aif_mi2s[] = {
+       {  1024000, P_PLL4, 4,  1,  96 },
+       {  1411200, P_PLL4, 4,  2, 139 },
+       {  1536000, P_PLL4, 4,  1,  64 },
+       {  2048000, P_PLL4, 4,  1,  48 },
+       {  2116800, P_PLL4, 4,  2,  93 },
+       {  2304000, P_PLL4, 4,  2,  85 },
+       {  2822400, P_PLL4, 4,  6, 209 },
+       {  3072000, P_PLL4, 4,  1,  32 },
+       {  3175200, P_PLL4, 4,  1,  31 },
+       {  4096000, P_PLL4, 4,  1,  24 },
+       {  4233600, P_PLL4, 4,  9, 209 },
+       {  4608000, P_PLL4, 4,  3,  64 },
+       {  5644800, P_PLL4, 4, 12, 209 },
+       {  6144000, P_PLL4, 4,  1,  16 },
+       {  6350400, P_PLL4, 4,  2,  31 },
+       {  8192000, P_PLL4, 4,  1,  12 },
+       {  8467200, P_PLL4, 4, 18, 209 },
+       {  9216000, P_PLL4, 4,  3,  32 },
+       { 11289600, P_PLL4, 4, 24, 209 },
+       { 12288000, P_PLL4, 4,  1,   8 },
+       { 12700800, P_PLL4, 4, 27, 209 },
+       { 13824000, P_PLL4, 4,  9,  64 },
+       { 16384000, P_PLL4, 4,  1,   6 },
+       { 16934400, P_PLL4, 4, 41, 238 },
+       { 18432000, P_PLL4, 4,  3,  16 },
+       { 22579200, P_PLL4, 2, 24, 209 },
+       { 24576000, P_PLL4, 4,  1,   4 },
+       { 27648000, P_PLL4, 4,  9,  32 },
+       { 33868800, P_PLL4, 4, 41, 119 },
+       { 36864000, P_PLL4, 4,  3,   8 },
+       { 45158400, P_PLL4, 1, 24, 209 },
+       { 49152000, P_PLL4, 4,  1,   2 },
+       { 50803200, P_PLL4, 1, 27, 209 },
+       { }
+};
+
+static struct clk_rcg mi2s_osr_src = {
+       .ns_reg = 0x48,
+       .md_reg = 0x4c,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 24,
+               .m_val_shift = 8,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_aif_mi2s,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_osr_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static const char *lcc_mi2s_parents[] = {
+       "mi2s_osr_src",
+};
+
+static struct clk_branch mi2s_osr_clk = {
+       .halt_reg = 0x50,
+       .halt_bit = 1,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(17),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_osr_clk",
+                       .parent_names = lcc_mi2s_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div mi2s_div_clk = {
+       .reg = 0x48,
+       .shift = 10,
+       .width = 4,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_div_clk",
+                       .parent_names = lcc_mi2s_parents,
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+               },
+       },
+};
+
+static struct clk_branch mi2s_bit_div_clk = {
+       .halt_reg = 0x50,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(15),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_bit_div_clk",
+                       .parent_names = (const char *[]){ "mi2s_div_clk" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+
+static struct clk_regmap_mux mi2s_bit_clk = {
+       .reg = 0x48,
+       .shift = 14,
+       .width = 1,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_bit_clk",
+                       .parent_names = (const char *[]){
+                               "mi2s_bit_div_clk",
+                               "mi2s_codec_clk",
+                       },
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct freq_tbl clk_tbl_pcm[] = {
+       {   64000, P_PLL4, 4, 1, 1536 },
+       {  128000, P_PLL4, 4, 1,  768 },
+       {  256000, P_PLL4, 4, 1,  384 },
+       {  512000, P_PLL4, 4, 1,  192 },
+       { 1024000, P_PLL4, 4, 1,   96 },
+       { 2048000, P_PLL4, 4, 1,   48 },
+       { },
+};
+
+static struct clk_rcg pcm_src = {
+       .ns_reg = 0x54,
+       .md_reg = 0x58,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_pcm,
+       .clkr = {
+               .enable_reg = 0x54,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch pcm_clk_out = {
+       .halt_reg = 0x5c,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x54,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_clk_out",
+                       .parent_names = (const char *[]){ "pcm_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_mux pcm_clk = {
+       .reg = 0x54,
+       .shift = 10,
+       .width = 1,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_clk",
+                       .parent_names = (const char *[]){
+                               "pcm_clk_out",
+                               "pcm_codec_clk",
+                       },
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct freq_tbl clk_tbl_aif_osr[] = {
+       {  22050, P_PLL4, 1, 147, 20480 },
+       {  32000, P_PLL4, 1,   1,    96 },
+       {  44100, P_PLL4, 1, 147, 10240 },
+       {  48000, P_PLL4, 1,   1,    64 },
+       {  88200, P_PLL4, 1, 147,  5120 },
+       {  96000, P_PLL4, 1,   1,    32 },
+       { 176400, P_PLL4, 1, 147,  2560 },
+       { 192000, P_PLL4, 1,   1,    16 },
+       { },
+};
+
+static struct clk_rcg spdif_src = {
+       .ns_reg = 0xcc,
+       .md_reg = 0xd0,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_aif_osr,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "spdif_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static const char *lcc_spdif_parents[] = {
+       "spdif_src",
+};
+
+static struct clk_branch spdif_clk = {
+       .halt_reg = 0xd4,
+       .halt_bit = 1,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(12),
+               .hw.init = &(struct clk_init_data){
+                       .name = "spdif_clk",
+                       .parent_names = lcc_spdif_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct freq_tbl clk_tbl_ahbix[] = {
+       { 131072, P_PLL4, 1, 1, 3 },
+       { },
+};
+
+static struct clk_rcg ahbix_clk = {
+       .ns_reg = 0x38,
+       .md_reg = 0x3c,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 24,
+               .m_val_shift = 8,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_ahbix,
+       .clkr = {
+               .enable_reg = 0x38,
+               .enable_mask = BIT(10), /* toggle the gfmux to select mn/pxo */
+               .hw.init = &(struct clk_init_data){
+                       .name = "ahbix",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_regmap *lcc_ipq806x_clks[] = {
+       [PLL4] = &pll4.clkr,
+       [MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
+       [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
+       [MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
+       [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
+       [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
+       [PCM_SRC] = &pcm_src.clkr,
+       [PCM_CLK_OUT] = &pcm_clk_out.clkr,
+       [PCM_CLK] = &pcm_clk.clkr,
+       [SPDIF_SRC] = &spdif_src.clkr,
+       [SPDIF_CLK] = &spdif_clk.clkr,
+       [AHBIX_CLK] = &ahbix_clk.clkr,
+};
+
+static const struct regmap_config lcc_ipq806x_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0xfc,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc lcc_ipq806x_desc = {
+       .config = &lcc_ipq806x_regmap_config,
+       .clks = lcc_ipq806x_clks,
+       .num_clks = ARRAY_SIZE(lcc_ipq806x_clks),
+};
+
+static const struct of_device_id lcc_ipq806x_match_table[] = {
+       { .compatible = "qcom,lcc-ipq8064" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, lcc_ipq806x_match_table);
+
+static int lcc_ipq806x_probe(struct platform_device *pdev)
+{
+       u32 val;
+       struct regmap *regmap;
+
+       regmap = qcom_cc_map(pdev, &lcc_ipq806x_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       /* Configure the rate of PLL4 if the bootloader hasn't already */
+       val = regmap_read(regmap, 0x0, &val);
+       if (!val)
+               clk_pll_configure_sr(&pll4, regmap, &pll4_config, true);
+       /* Enable PLL4 source on the LPASS Primary PLL Mux */
+       regmap_write(regmap, 0xc4, 0x1);
+
+       return qcom_cc_really_probe(pdev, &lcc_ipq806x_desc, regmap);
+}
+
+static int lcc_ipq806x_remove(struct platform_device *pdev)
+{
+       qcom_cc_remove(pdev);
+       return 0;
+}
+
+static struct platform_driver lcc_ipq806x_driver = {
+       .probe          = lcc_ipq806x_probe,
+       .remove         = lcc_ipq806x_remove,
+       .driver         = {
+               .name   = "lcc-ipq806x",
+               .owner  = THIS_MODULE,
+               .of_match_table = lcc_ipq806x_match_table,
+       },
+};
+module_platform_driver(lcc_ipq806x_driver);
+
+MODULE_DESCRIPTION("QCOM LCC IPQ806x Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lcc-ipq806x");
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
new file mode 100644 (file)
index 0000000..a75a408
--- /dev/null
@@ -0,0 +1,585 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk-provider.h>
+#include <linux/regmap.h>
+
+#include <dt-bindings/clock/qcom,lcc-msm8960.h>
+
+#include "common.h"
+#include "clk-regmap.h"
+#include "clk-pll.h"
+#include "clk-rcg.h"
+#include "clk-branch.h"
+#include "clk-regmap-divider.h"
+#include "clk-regmap-mux.h"
+
+static struct clk_pll pll4 = {
+       .l_reg = 0x4,
+       .m_reg = 0x8,
+       .n_reg = 0xc,
+       .config_reg = 0x14,
+       .mode_reg = 0x0,
+       .status_reg = 0x18,
+       .status_bit = 16,
+       .clkr.hw.init = &(struct clk_init_data){
+               .name = "pll4",
+               .parent_names = (const char *[]){ "pxo" },
+               .num_parents = 1,
+               .ops = &clk_pll_ops,
+       },
+};
+
+#define P_PXO  0
+#define P_PLL4 1
+
+static const u8 lcc_pxo_pll4_map[] = {
+       [P_PXO]         = 0,
+       [P_PLL4]        = 2,
+};
+
+static const char *lcc_pxo_pll4[] = {
+       "pxo",
+       "pll4_vote",
+};
+
+static struct freq_tbl clk_tbl_aif_osr_492[] = {
+       {   512000, P_PLL4, 4, 1, 240 },
+       {   768000, P_PLL4, 4, 1, 160 },
+       {  1024000, P_PLL4, 4, 1, 120 },
+       {  1536000, P_PLL4, 4, 1,  80 },
+       {  2048000, P_PLL4, 4, 1,  60 },
+       {  3072000, P_PLL4, 4, 1,  40 },
+       {  4096000, P_PLL4, 4, 1,  30 },
+       {  6144000, P_PLL4, 4, 1,  20 },
+       {  8192000, P_PLL4, 4, 1,  15 },
+       { 12288000, P_PLL4, 4, 1,  10 },
+       { 24576000, P_PLL4, 4, 1,   5 },
+       { 27000000, P_PXO,  1, 0,   0 },
+       { }
+};
+
+static struct freq_tbl clk_tbl_aif_osr_393[] = {
+       {   512000, P_PLL4, 4, 1, 192 },
+       {   768000, P_PLL4, 4, 1, 128 },
+       {  1024000, P_PLL4, 4, 1,  96 },
+       {  1536000, P_PLL4, 4, 1,  64 },
+       {  2048000, P_PLL4, 4, 1,  48 },
+       {  3072000, P_PLL4, 4, 1,  32 },
+       {  4096000, P_PLL4, 4, 1,  24 },
+       {  6144000, P_PLL4, 4, 1,  16 },
+       {  8192000, P_PLL4, 4, 1,  12 },
+       { 12288000, P_PLL4, 4, 1,   8 },
+       { 24576000, P_PLL4, 4, 1,   4 },
+       { 27000000, P_PXO,  1, 0,   0 },
+       { }
+};
+
+static struct clk_rcg mi2s_osr_src = {
+       .ns_reg = 0x48,
+       .md_reg = 0x4c,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 24,
+               .m_val_shift = 8,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_aif_osr_393,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_osr_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static const char *lcc_mi2s_parents[] = {
+       "mi2s_osr_src",
+};
+
+static struct clk_branch mi2s_osr_clk = {
+       .halt_reg = 0x50,
+       .halt_bit = 1,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(17),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_osr_clk",
+                       .parent_names = lcc_mi2s_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_div mi2s_div_clk = {
+       .reg = 0x48,
+       .shift = 10,
+       .width = 4,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(15),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_div_clk",
+                       .parent_names = lcc_mi2s_parents,
+                       .num_parents = 1,
+                       .ops = &clk_regmap_div_ops,
+               },
+       },
+};
+
+static struct clk_branch mi2s_bit_div_clk = {
+       .halt_reg = 0x50,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x48,
+               .enable_mask = BIT(15),
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_bit_div_clk",
+                       .parent_names = (const char *[]){ "mi2s_div_clk" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_mux mi2s_bit_clk = {
+       .reg = 0x48,
+       .shift = 14,
+       .width = 1,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "mi2s_bit_clk",
+                       .parent_names = (const char *[]){
+                               "mi2s_bit_div_clk",
+                               "mi2s_codec_clk",
+                       },
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+#define CLK_AIF_OSR_DIV(prefix, _ns, _md, hr)                  \
+static struct clk_rcg prefix##_osr_src = {                     \
+       .ns_reg = _ns,                                          \
+       .md_reg = _md,                                          \
+       .mn = {                                                 \
+               .mnctr_en_bit = 8,                              \
+               .mnctr_reset_bit = 7,                           \
+               .mnctr_mode_shift = 5,                          \
+               .n_val_shift = 24,                              \
+               .m_val_shift = 8,                               \
+               .width = 8,                                     \
+       },                                                      \
+       .p = {                                                  \
+               .pre_div_shift = 3,                             \
+               .pre_div_width = 2,                             \
+       },                                                      \
+       .s = {                                                  \
+               .src_sel_shift = 0,                             \
+               .parent_map = lcc_pxo_pll4_map,                 \
+       },                                                      \
+       .freq_tbl = clk_tbl_aif_osr_393,                        \
+       .clkr = {                                               \
+               .enable_reg = _ns,                              \
+               .enable_mask = BIT(9),                          \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_osr_src",             \
+                       .parent_names = lcc_pxo_pll4,           \
+                       .num_parents = 2,                       \
+                       .ops = &clk_rcg_ops,                    \
+                       .flags = CLK_SET_RATE_GATE,             \
+               },                                              \
+       },                                                      \
+};                                                             \
+                                                               \
+static const char *lcc_##prefix##_parents[] = {                        \
+       #prefix "_osr_src",                                     \
+};                                                             \
+                                                               \
+static struct clk_branch prefix##_osr_clk = {                  \
+       .halt_reg = hr,                                         \
+       .halt_bit = 1,                                          \
+       .halt_check = BRANCH_HALT_ENABLE,                       \
+       .clkr = {                                               \
+               .enable_reg = _ns,                              \
+               .enable_mask = BIT(21),                         \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_osr_clk",             \
+                       .parent_names = lcc_##prefix##_parents, \
+                       .num_parents = 1,                       \
+                       .ops = &clk_branch_ops,                 \
+                       .flags = CLK_SET_RATE_PARENT,           \
+               },                                              \
+       },                                                      \
+};                                                             \
+                                                               \
+static struct clk_regmap_div prefix##_div_clk = {              \
+       .reg = _ns,                                             \
+       .shift = 10,                                            \
+       .width = 8,                                             \
+       .clkr = {                                               \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_div_clk",             \
+                       .parent_names = lcc_##prefix##_parents, \
+                       .num_parents = 1,                       \
+                       .ops = &clk_regmap_div_ops,             \
+               },                                              \
+       },                                                      \
+};                                                             \
+                                                               \
+static struct clk_branch prefix##_bit_div_clk = {              \
+       .halt_reg = hr,                                         \
+       .halt_bit = 0,                                          \
+       .halt_check = BRANCH_HALT_ENABLE,                       \
+       .clkr = {                                               \
+               .enable_reg = _ns,                              \
+               .enable_mask = BIT(19),                         \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_bit_div_clk",         \
+                       .parent_names = (const char *[]){       \
+                               #prefix "_div_clk"              \
+                       },                                      \
+                       .num_parents = 1,                       \
+                       .ops = &clk_branch_ops,                 \
+                       .flags = CLK_SET_RATE_PARENT,           \
+               },                                              \
+       },                                                      \
+};                                                             \
+                                                               \
+static struct clk_regmap_mux prefix##_bit_clk = {              \
+       .reg = _ns,                                             \
+       .shift = 18,                                            \
+       .width = 1,                                             \
+       .clkr = {                                               \
+               .hw.init = &(struct clk_init_data){             \
+                       .name = #prefix "_bit_clk",             \
+                       .parent_names = (const char *[]){       \
+                               #prefix "_bit_div_clk",         \
+                               #prefix "_codec_clk",           \
+                       },                                      \
+                       .num_parents = 2,                       \
+                       .ops = &clk_regmap_mux_closest_ops,     \
+                       .flags = CLK_SET_RATE_PARENT,           \
+               },                                              \
+       },                                                      \
+}
+
+CLK_AIF_OSR_DIV(codec_i2s_mic, 0x60, 0x64, 0x68);
+CLK_AIF_OSR_DIV(spare_i2s_mic, 0x78, 0x7c, 0x80);
+CLK_AIF_OSR_DIV(codec_i2s_spkr, 0x6c, 0x70, 0x74);
+CLK_AIF_OSR_DIV(spare_i2s_spkr, 0x84, 0x88, 0x8c);
+
+static struct freq_tbl clk_tbl_pcm_492[] = {
+       {   256000, P_PLL4, 4, 1, 480 },
+       {   512000, P_PLL4, 4, 1, 240 },
+       {   768000, P_PLL4, 4, 1, 160 },
+       {  1024000, P_PLL4, 4, 1, 120 },
+       {  1536000, P_PLL4, 4, 1,  80 },
+       {  2048000, P_PLL4, 4, 1,  60 },
+       {  3072000, P_PLL4, 4, 1,  40 },
+       {  4096000, P_PLL4, 4, 1,  30 },
+       {  6144000, P_PLL4, 4, 1,  20 },
+       {  8192000, P_PLL4, 4, 1,  15 },
+       { 12288000, P_PLL4, 4, 1,  10 },
+       { 24576000, P_PLL4, 4, 1,   5 },
+       { 27000000, P_PXO,  1, 0,   0 },
+       { }
+};
+
+static struct freq_tbl clk_tbl_pcm_393[] = {
+       {   256000, P_PLL4, 4, 1, 384 },
+       {   512000, P_PLL4, 4, 1, 192 },
+       {   768000, P_PLL4, 4, 1, 128 },
+       {  1024000, P_PLL4, 4, 1,  96 },
+       {  1536000, P_PLL4, 4, 1,  64 },
+       {  2048000, P_PLL4, 4, 1,  48 },
+       {  3072000, P_PLL4, 4, 1,  32 },
+       {  4096000, P_PLL4, 4, 1,  24 },
+       {  6144000, P_PLL4, 4, 1,  16 },
+       {  8192000, P_PLL4, 4, 1,  12 },
+       { 12288000, P_PLL4, 4, 1,   8 },
+       { 24576000, P_PLL4, 4, 1,   4 },
+       { 27000000, P_PXO,  1, 0,   0 },
+       { }
+};
+
+static struct clk_rcg pcm_src = {
+       .ns_reg = 0x54,
+       .md_reg = 0x58,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 16,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_pcm_393,
+       .clkr = {
+               .enable_reg = 0x54,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static struct clk_branch pcm_clk_out = {
+       .halt_reg = 0x5c,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0x54,
+               .enable_mask = BIT(11),
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_clk_out",
+                       .parent_names = (const char *[]){ "pcm_src" },
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap_mux pcm_clk = {
+       .reg = 0x54,
+       .shift = 10,
+       .width = 1,
+       .clkr = {
+               .hw.init = &(struct clk_init_data){
+                       .name = "pcm_clk",
+                       .parent_names = (const char *[]){
+                               "pcm_clk_out",
+                               "pcm_codec_clk",
+                       },
+                       .num_parents = 2,
+                       .ops = &clk_regmap_mux_closest_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_rcg slimbus_src = {
+       .ns_reg = 0xcc,
+       .md_reg = 0xd0,
+       .mn = {
+               .mnctr_en_bit = 8,
+               .mnctr_reset_bit = 7,
+               .mnctr_mode_shift = 5,
+               .n_val_shift = 16,
+               .m_val_shift = 16,
+               .width = 8,
+       },
+       .p = {
+               .pre_div_shift = 3,
+               .pre_div_width = 2,
+       },
+       .s = {
+               .src_sel_shift = 0,
+               .parent_map = lcc_pxo_pll4_map,
+       },
+       .freq_tbl = clk_tbl_aif_osr_393,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(9),
+               .hw.init = &(struct clk_init_data){
+                       .name = "slimbus_src",
+                       .parent_names = lcc_pxo_pll4,
+                       .num_parents = 2,
+                       .ops = &clk_rcg_ops,
+                       .flags = CLK_SET_RATE_GATE,
+               },
+       },
+};
+
+static const char *lcc_slimbus_parents[] = {
+       "slimbus_src",
+};
+
+static struct clk_branch audio_slimbus_clk = {
+       .halt_reg = 0xd4,
+       .halt_bit = 0,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(10),
+               .hw.init = &(struct clk_init_data){
+                       .name = "audio_slimbus_clk",
+                       .parent_names = lcc_slimbus_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_branch sps_slimbus_clk = {
+       .halt_reg = 0xd4,
+       .halt_bit = 1,
+       .halt_check = BRANCH_HALT_ENABLE,
+       .clkr = {
+               .enable_reg = 0xcc,
+               .enable_mask = BIT(12),
+               .hw.init = &(struct clk_init_data){
+                       .name = "sps_slimbus_clk",
+                       .parent_names = lcc_slimbus_parents,
+                       .num_parents = 1,
+                       .ops = &clk_branch_ops,
+                       .flags = CLK_SET_RATE_PARENT,
+               },
+       },
+};
+
+static struct clk_regmap *lcc_msm8960_clks[] = {
+       [PLL4] = &pll4.clkr,
+       [MI2S_OSR_SRC] = &mi2s_osr_src.clkr,
+       [MI2S_OSR_CLK] = &mi2s_osr_clk.clkr,
+       [MI2S_DIV_CLK] = &mi2s_div_clk.clkr,
+       [MI2S_BIT_DIV_CLK] = &mi2s_bit_div_clk.clkr,
+       [MI2S_BIT_CLK] = &mi2s_bit_clk.clkr,
+       [PCM_SRC] = &pcm_src.clkr,
+       [PCM_CLK_OUT] = &pcm_clk_out.clkr,
+       [PCM_CLK] = &pcm_clk.clkr,
+       [SLIMBUS_SRC] = &slimbus_src.clkr,
+       [AUDIO_SLIMBUS_CLK] = &audio_slimbus_clk.clkr,
+       [SPS_SLIMBUS_CLK] = &sps_slimbus_clk.clkr,
+       [CODEC_I2S_MIC_OSR_SRC] = &codec_i2s_mic_osr_src.clkr,
+       [CODEC_I2S_MIC_OSR_CLK] = &codec_i2s_mic_osr_clk.clkr,
+       [CODEC_I2S_MIC_DIV_CLK] = &codec_i2s_mic_div_clk.clkr,
+       [CODEC_I2S_MIC_BIT_DIV_CLK] = &codec_i2s_mic_bit_div_clk.clkr,
+       [CODEC_I2S_MIC_BIT_CLK] = &codec_i2s_mic_bit_clk.clkr,
+       [SPARE_I2S_MIC_OSR_SRC] = &spare_i2s_mic_osr_src.clkr,
+       [SPARE_I2S_MIC_OSR_CLK] = &spare_i2s_mic_osr_clk.clkr,
+       [SPARE_I2S_MIC_DIV_CLK] = &spare_i2s_mic_div_clk.clkr,
+       [SPARE_I2S_MIC_BIT_DIV_CLK] = &spare_i2s_mic_bit_div_clk.clkr,
+       [SPARE_I2S_MIC_BIT_CLK] = &spare_i2s_mic_bit_clk.clkr,
+       [CODEC_I2S_SPKR_OSR_SRC] = &codec_i2s_spkr_osr_src.clkr,
+       [CODEC_I2S_SPKR_OSR_CLK] = &codec_i2s_spkr_osr_clk.clkr,
+       [CODEC_I2S_SPKR_DIV_CLK] = &codec_i2s_spkr_div_clk.clkr,
+       [CODEC_I2S_SPKR_BIT_DIV_CLK] = &codec_i2s_spkr_bit_div_clk.clkr,
+       [CODEC_I2S_SPKR_BIT_CLK] = &codec_i2s_spkr_bit_clk.clkr,
+       [SPARE_I2S_SPKR_OSR_SRC] = &spare_i2s_spkr_osr_src.clkr,
+       [SPARE_I2S_SPKR_OSR_CLK] = &spare_i2s_spkr_osr_clk.clkr,
+       [SPARE_I2S_SPKR_DIV_CLK] = &spare_i2s_spkr_div_clk.clkr,
+       [SPARE_I2S_SPKR_BIT_DIV_CLK] = &spare_i2s_spkr_bit_div_clk.clkr,
+       [SPARE_I2S_SPKR_BIT_CLK] = &spare_i2s_spkr_bit_clk.clkr,
+};
+
+static const struct regmap_config lcc_msm8960_regmap_config = {
+       .reg_bits       = 32,
+       .reg_stride     = 4,
+       .val_bits       = 32,
+       .max_register   = 0xfc,
+       .fast_io        = true,
+};
+
+static const struct qcom_cc_desc lcc_msm8960_desc = {
+       .config = &lcc_msm8960_regmap_config,
+       .clks = lcc_msm8960_clks,
+       .num_clks = ARRAY_SIZE(lcc_msm8960_clks),
+};
+
+static const struct of_device_id lcc_msm8960_match_table[] = {
+       { .compatible = "qcom,lcc-msm8960" },
+       { .compatible = "qcom,lcc-apq8064" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, lcc_msm8960_match_table);
+
+static int lcc_msm8960_probe(struct platform_device *pdev)
+{
+       u32 val;
+       struct regmap *regmap;
+
+       regmap = qcom_cc_map(pdev, &lcc_msm8960_desc);
+       if (IS_ERR(regmap))
+               return PTR_ERR(regmap);
+
+       /* Use the correct frequency plan depending on speed of PLL4 */
+       val = regmap_read(regmap, 0x4, &val);
+       if (val == 0x12) {
+               slimbus_src.freq_tbl = clk_tbl_aif_osr_492;
+               mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               codec_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               spare_i2s_mic_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               codec_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               spare_i2s_spkr_osr_src.freq_tbl = clk_tbl_aif_osr_492;
+               pcm_src.freq_tbl = clk_tbl_pcm_492;
+       }
+       /* Enable PLL4 source on the LPASS Primary PLL Mux */
+       regmap_write(regmap, 0xc4, 0x1);
+
+       return qcom_cc_really_probe(pdev, &lcc_msm8960_desc, regmap);
+}
+
+static int lcc_msm8960_remove(struct platform_device *pdev)
+{
+       qcom_cc_remove(pdev);
+       return 0;
+}
+
+static struct platform_driver lcc_msm8960_driver = {
+       .probe          = lcc_msm8960_probe,
+       .remove         = lcc_msm8960_remove,
+       .driver         = {
+               .name   = "lcc-msm8960",
+               .owner  = THIS_MODULE,
+               .of_match_table = lcc_msm8960_match_table,
+       },
+};
+module_platform_driver(lcc_msm8960_driver);
+
+MODULE_DESCRIPTION("QCOM LCC MSM8960 Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:lcc-msm8960");
index cbcddcc02475233f53b2d150f8395a99c3b4ba4d..05d7a0bc059907872ff1719d0c401a0494082199 100644 (file)
@@ -535,44 +535,44 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0,
                        RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 8, GFLAGS),
-       COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0,
+       COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(17), 0,
                        RK3288_CLKGATE_CON(1), 9, GFLAGS),
-       MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, 0,
+       MUX(SCLK_UART0, "sclk_uart0", mux_uart0_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(13), 8, 2, MFLAGS),
        MUX(0, "uart_src", mux_pll_src_cpll_gpll_p, 0,
                        RK3288_CLKSEL_CON(13), 15, 1, MFLAGS),
        COMPOSITE_NOMUX(0, "uart1_src", "uart_src", 0,
                        RK3288_CLKSEL_CON(14), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 10, GFLAGS),
-       COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", 0,
+       COMPOSITE_FRAC(0, "uart1_frac", "uart1_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(18), 0,
                        RK3288_CLKGATE_CON(1), 11, GFLAGS),
-       MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, 0,
+       MUX(SCLK_UART1, "sclk_uart1", mux_uart1_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(14), 8, 2, MFLAGS),
        COMPOSITE_NOMUX(0, "uart2_src", "uart_src", 0,
                        RK3288_CLKSEL_CON(15), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 12, GFLAGS),
-       COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", 0,
+       COMPOSITE_FRAC(0, "uart2_frac", "uart2_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(19), 0,
                        RK3288_CLKGATE_CON(1), 13, GFLAGS),
-       MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, 0,
+       MUX(SCLK_UART2, "sclk_uart2", mux_uart2_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(15), 8, 2, MFLAGS),
        COMPOSITE_NOMUX(0, "uart3_src", "uart_src", 0,
                        RK3288_CLKSEL_CON(16), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 14, GFLAGS),
-       COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", 0,
+       COMPOSITE_FRAC(0, "uart3_frac", "uart3_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(20), 0,
                        RK3288_CLKGATE_CON(1), 15, GFLAGS),
-       MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, 0,
+       MUX(SCLK_UART3, "sclk_uart3", mux_uart3_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(16), 8, 2, MFLAGS),
        COMPOSITE_NOMUX(0, "uart4_src", "uart_src", 0,
                        RK3288_CLKSEL_CON(3), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(2), 12, GFLAGS),
-       COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", 0,
+       COMPOSITE_FRAC(0, "uart4_frac", "uart4_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(7), 0,
                        RK3288_CLKGATE_CON(2), 13, GFLAGS),
-       MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, 0,
+       MUX(SCLK_UART4, "sclk_uart4", mux_uart4_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(3), 8, 2, MFLAGS),
 
        COMPOSITE(0, "mac_pll_src", mux_pll_src_npll_cpll_gpll_p, 0,
@@ -598,7 +598,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(0, "jtag", "ext_jtag", 0,
                        RK3288_CLKGATE_CON(4), 14, GFLAGS),
 
-       COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0,
+       COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
                        RK3288_CLKSEL_CON(13), 11, 2, MFLAGS,
                        RK3288_CLKGATE_CON(5), 14, GFLAGS),
        COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
@@ -704,8 +704,8 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
 
        GATE(SCLK_LCDC_PWM0, "sclk_lcdc_pwm0", "xin24m", 0, RK3288_CLKGATE_CON(13), 10, GFLAGS),
        GATE(SCLK_LCDC_PWM1, "sclk_lcdc_pwm1", "xin24m", 0, RK3288_CLKGATE_CON(13), 11, GFLAGS),
-       GATE(0, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS),
-       GATE(0, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS),
+       GATE(SCLK_PVTM_CORE, "sclk_pvtm_core", "xin24m", 0, RK3288_CLKGATE_CON(5), 9, GFLAGS),
+       GATE(SCLK_PVTM_GPU, "sclk_pvtm_gpu", "xin24m", 0, RK3288_CLKGATE_CON(5), 10, GFLAGS),
        GATE(0, "sclk_mipidsi_24m", "xin24m", 0, RK3288_CLKGATE_CON(5), 15, GFLAGS),
 
        /* sclk_gpu gates */
@@ -805,6 +805,20 @@ static int rk3288_clk_suspend(void)
                rk3288_saved_cru_regs[i] =
                                readl_relaxed(rk3288_cru_base + reg_id);
        }
+
+       /*
+        * Switch PLLs other than DPLL (for SDRAM) to slow mode to
+        * avoid crashes on resume. The Mask ROM on the system will
+        * put APLL, CPLL, and GPLL into slow mode at resume time
+        * anyway (which is why we restore them), but we might not
+        * even make it to the Mask ROM if this isn't done at suspend
+        * time.
+        *
+        * NOTE: only APLL truly matters here, but we'll do them all.
+        */
+
+       writel_relaxed(0xf3030000, rk3288_cru_base + RK3288_MODE_CON);
+
        return 0;
 }
 
@@ -866,6 +880,14 @@ static void __init rk3288_clk_init(struct device_node *np)
                pr_warn("%s: could not register clock hclk_vcodec_pre: %ld\n",
                        __func__, PTR_ERR(clk));
 
+       /* Watchdog pclk is controlled by RK3288_SGRF_SOC_CON0[1]. */
+       clk = clk_register_fixed_factor(NULL, "pclk_wdt", "pclk_pd_alive", 0, 1, 1);
+       if (IS_ERR(clk))
+               pr_warn("%s: could not register clock pclk_wdt: %ld\n",
+                       __func__, PTR_ERR(clk));
+       else
+               rockchip_clk_add_lookup(clk, PCLK_WDT);
+
        rockchip_clk_register_plls(rk3288_pll_clks,
                                   ARRAY_SIZE(rk3288_pll_clks),
                                   RK3288_GRF_SOC_STATUS1);
index f2c2ccce49bb1ad00f7502e9d4e4236fc6bcbe54..454b02ae486a86917f31614c2c4b016841deefaa 100644 (file)
@@ -82,6 +82,26 @@ static const struct of_device_id exynos_audss_clk_of_match[] = {
        {},
 };
 
+static void exynos_audss_clk_teardown(void)
+{
+       int i;
+
+       for (i = EXYNOS_MOUT_AUDSS; i < EXYNOS_DOUT_SRP; i++) {
+               if (!IS_ERR(clk_table[i]))
+                       clk_unregister_mux(clk_table[i]);
+       }
+
+       for (; i < EXYNOS_SRP_CLK; i++) {
+               if (!IS_ERR(clk_table[i]))
+                       clk_unregister_divider(clk_table[i]);
+       }
+
+       for (; i < clk_data.clk_num; i++) {
+               if (!IS_ERR(clk_table[i]))
+                       clk_unregister_gate(clk_table[i]);
+       }
+}
+
 /* register exynos_audss clocks */
 static int exynos_audss_clk_probe(struct platform_device *pdev)
 {
@@ -219,10 +239,7 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
        return 0;
 
 unregister:
-       for (i = 0; i < clk_data.clk_num; i++) {
-               if (!IS_ERR(clk_table[i]))
-                       clk_unregister(clk_table[i]);
-       }
+       exynos_audss_clk_teardown();
 
        if (!IS_ERR(epll))
                clk_disable_unprepare(epll);
@@ -232,18 +249,13 @@ unregister:
 
 static int exynos_audss_clk_remove(struct platform_device *pdev)
 {
-       int i;
-
 #ifdef CONFIG_PM_SLEEP
        unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
 #endif
 
        of_clk_del_provider(pdev->dev.of_node);
 
-       for (i = 0; i < clk_data.clk_num; i++) {
-               if (!IS_ERR(clk_table[i]))
-                       clk_unregister(clk_table[i]);
-       }
+       exynos_audss_clk_teardown();
 
        if (!IS_ERR(epll))
                clk_disable_unprepare(epll);
index 6e6cca3920829b5a7dc0685a071389a2d201d780..cc4c348d8a24038007375bcf53ba45a96bcd9ac9 100644 (file)
 #define PWR_CTRL1_USE_CORE1_WFI                        (1 << 1)
 #define PWR_CTRL1_USE_CORE0_WFI                        (1 << 0)
 
-/* list of PLLs to be registered */
-enum exynos3250_plls {
-       apll, mpll, vpll, upll,
-       nr_plls
-};
-
-/* list of PLLs in DMC block to be registered */
-enum exynos3250_dmc_plls {
-       bpll, epll,
-       nr_dmc_plls
-};
-
-static void __iomem *reg_base;
-static void __iomem *dmc_reg_base;
-
-/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos3250_clk_regs;
-
 static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
        SRC_LEFTBUS,
        DIV_LEFTBUS,
@@ -195,43 +174,6 @@ static unsigned long exynos3250_cmu_clk_regs[] __initdata = {
        PWR_CTRL2,
 };
 
-static int exynos3250_clk_suspend(void)
-{
-       samsung_clk_save(reg_base, exynos3250_clk_regs,
-                               ARRAY_SIZE(exynos3250_cmu_clk_regs));
-       return 0;
-}
-
-static void exynos3250_clk_resume(void)
-{
-       samsung_clk_restore(reg_base, exynos3250_clk_regs,
-                               ARRAY_SIZE(exynos3250_cmu_clk_regs));
-}
-
-static struct syscore_ops exynos3250_clk_syscore_ops = {
-       .suspend = exynos3250_clk_suspend,
-       .resume = exynos3250_clk_resume,
-};
-
-static void exynos3250_clk_sleep_init(void)
-{
-       exynos3250_clk_regs =
-               samsung_clk_alloc_reg_dump(exynos3250_cmu_clk_regs,
-                                          ARRAY_SIZE(exynos3250_cmu_clk_regs));
-       if (!exynos3250_clk_regs) {
-               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-               goto err;
-       }
-
-       register_syscore_ops(&exynos3250_clk_syscore_ops);
-       return;
-err:
-       kfree(exynos3250_clk_regs);
-}
-#else
-static inline void exynos3250_clk_sleep_init(void) { }
-#endif
-
 /* list of all parent clock list */
 PNAME(mout_vpllsrc_p)          = { "fin_pll", };
 
@@ -782,18 +724,18 @@ static struct samsung_pll_rate_table exynos3250_vpll_rates[] = {
        { /* sentinel */ }
 };
 
-static struct samsung_pll_clock exynos3250_plls[nr_plls] __initdata = {
-       [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
-                       APLL_LOCK, APLL_CON0, NULL),
-       [mpll] = PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
-                       MPLL_LOCK, MPLL_CON0, NULL),
-       [vpll] = PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
-                       VPLL_LOCK, VPLL_CON0, NULL),
-       [upll] = PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll",
-                       UPLL_LOCK, UPLL_CON0, NULL),
+static struct samsung_pll_clock exynos3250_plls[] __initdata = {
+       PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+               APLL_LOCK, APLL_CON0, exynos3250_pll_rates),
+       PLL(pll_35xx, CLK_FOUT_MPLL, "fout_mpll", "fin_pll",
+                       MPLL_LOCK, MPLL_CON0, exynos3250_pll_rates),
+       PLL(pll_36xx, CLK_FOUT_VPLL, "fout_vpll", "fin_pll",
+                       VPLL_LOCK, VPLL_CON0, exynos3250_vpll_rates),
+       PLL(pll_35xx, CLK_FOUT_UPLL, "fout_upll", "fin_pll",
+                       UPLL_LOCK, UPLL_CON0, exynos3250_pll_rates),
 };
 
-static void __init exynos3_core_down_clock(void)
+static void __init exynos3_core_down_clock(void __iomem *reg_base)
 {
        unsigned int tmp;
 
@@ -814,38 +756,31 @@ static void __init exynos3_core_down_clock(void)
        __raw_writel(0x0, reg_base + PWR_CTRL2);
 }
 
+static struct samsung_cmu_info cmu_info __initdata = {
+       .pll_clks               = exynos3250_plls,
+       .nr_pll_clks            = ARRAY_SIZE(exynos3250_plls),
+       .mux_clks               = mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(mux_clks),
+       .div_clks               = div_clks,
+       .nr_div_clks            = ARRAY_SIZE(div_clks),
+       .gate_clks              = gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(gate_clks),
+       .fixed_factor_clks      = fixed_factor_clks,
+       .nr_fixed_factor_clks   = ARRAY_SIZE(fixed_factor_clks),
+       .nr_clk_ids             = CLK_NR_CLKS,
+       .clk_regs               = exynos3250_cmu_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(exynos3250_cmu_clk_regs),
+};
+
 static void __init exynos3250_cmu_init(struct device_node *np)
 {
        struct samsung_clk_provider *ctx;
 
-       reg_base = of_iomap(np, 0);
-       if (!reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+       ctx = samsung_cmu_register_one(np, &cmu_info);
        if (!ctx)
-               panic("%s: unable to allocate context.\n", __func__);
-
-       samsung_clk_register_fixed_factor(ctx, fixed_factor_clks,
-                                         ARRAY_SIZE(fixed_factor_clks));
-
-       exynos3250_plls[apll].rate_table = exynos3250_pll_rates;
-       exynos3250_plls[mpll].rate_table = exynos3250_pll_rates;
-       exynos3250_plls[vpll].rate_table = exynos3250_vpll_rates;
-       exynos3250_plls[upll].rate_table = exynos3250_pll_rates;
-
-       samsung_clk_register_pll(ctx, exynos3250_plls,
-                                       ARRAY_SIZE(exynos3250_plls), reg_base);
-
-       samsung_clk_register_mux(ctx, mux_clks, ARRAY_SIZE(mux_clks));
-       samsung_clk_register_div(ctx, div_clks, ARRAY_SIZE(div_clks));
-       samsung_clk_register_gate(ctx, gate_clks, ARRAY_SIZE(gate_clks));
-
-       exynos3_core_down_clock();
+               return;
 
-       exynos3250_clk_sleep_init();
-
-       samsung_clk_of_add_provider(np, ctx);
+       exynos3_core_down_clock(ctx->reg_base);
 }
 CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
 
@@ -872,12 +807,6 @@ CLK_OF_DECLARE(exynos3250_cmu, "samsung,exynos3250-cmu", exynos3250_cmu_init);
 #define EPLL_CON2              0x111c
 #define SRC_EPLL               0x1120
 
-/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos3250_dmc_clk_regs;
-
 static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
        BPLL_LOCK,
        BPLL_CON0,
@@ -899,43 +828,6 @@ static unsigned long exynos3250_cmu_dmc_clk_regs[] __initdata = {
        SRC_EPLL,
 };
 
-static int exynos3250_dmc_clk_suspend(void)
-{
-       samsung_clk_save(dmc_reg_base, exynos3250_dmc_clk_regs,
-                               ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
-       return 0;
-}
-
-static void exynos3250_dmc_clk_resume(void)
-{
-       samsung_clk_restore(dmc_reg_base, exynos3250_dmc_clk_regs,
-                               ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
-}
-
-static struct syscore_ops exynos3250_dmc_clk_syscore_ops = {
-       .suspend = exynos3250_dmc_clk_suspend,
-       .resume = exynos3250_dmc_clk_resume,
-};
-
-static void exynos3250_dmc_clk_sleep_init(void)
-{
-       exynos3250_dmc_clk_regs =
-               samsung_clk_alloc_reg_dump(exynos3250_cmu_dmc_clk_regs,
-                                  ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs));
-       if (!exynos3250_dmc_clk_regs) {
-               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-               goto err;
-       }
-
-       register_syscore_ops(&exynos3250_dmc_clk_syscore_ops);
-       return;
-err:
-       kfree(exynos3250_dmc_clk_regs);
-}
-#else
-static inline void exynos3250_dmc_clk_sleep_init(void) { }
-#endif
-
 PNAME(mout_epll_p)     = { "fin_pll", "fout_epll", };
 PNAME(mout_bpll_p)     = { "fin_pll", "fout_bpll", };
 PNAME(mout_mpll_mif_p) = { "fin_pll", "sclk_mpll_mif", };
@@ -977,43 +869,28 @@ static struct samsung_div_clock dmc_div_clks[] __initdata = {
        DIV(CLK_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3),
 };
 
-static struct samsung_pll_clock exynos3250_dmc_plls[nr_dmc_plls] __initdata = {
-       [bpll] = PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll",
-                       BPLL_LOCK, BPLL_CON0, NULL),
-       [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
-                       EPLL_LOCK, EPLL_CON0, NULL),
+static struct samsung_pll_clock exynos3250_dmc_plls[] __initdata = {
+       PLL(pll_35xx, CLK_FOUT_BPLL, "fout_bpll", "fin_pll",
+               BPLL_LOCK, BPLL_CON0, exynos3250_pll_rates),
+       PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+               EPLL_LOCK, EPLL_CON0, exynos3250_epll_rates),
+};
+
+static struct samsung_cmu_info dmc_cmu_info __initdata = {
+       .pll_clks               = exynos3250_dmc_plls,
+       .nr_pll_clks            = ARRAY_SIZE(exynos3250_dmc_plls),
+       .mux_clks               = dmc_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(dmc_mux_clks),
+       .div_clks               = dmc_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(dmc_div_clks),
+       .nr_clk_ids             = NR_CLKS_DMC,
+       .clk_regs               = exynos3250_cmu_dmc_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(exynos3250_cmu_dmc_clk_regs),
 };
 
 static void __init exynos3250_cmu_dmc_init(struct device_node *np)
 {
-       struct samsung_clk_provider *ctx;
-
-       dmc_reg_base = of_iomap(np, 0);
-       if (!dmc_reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       ctx = samsung_clk_init(np, dmc_reg_base, NR_CLKS_DMC);
-       if (!ctx)
-               panic("%s: unable to allocate context.\n", __func__);
-
-       exynos3250_dmc_plls[bpll].rate_table = exynos3250_pll_rates;
-       exynos3250_dmc_plls[epll].rate_table = exynos3250_epll_rates;
-
-       pr_err("CLK registering epll bpll: %d, %d, %d, %d\n",
-                       exynos3250_dmc_plls[bpll].rate_table[0].rate,
-                       exynos3250_dmc_plls[bpll].rate_table[0].mdiv,
-                       exynos3250_dmc_plls[bpll].rate_table[0].pdiv,
-                       exynos3250_dmc_plls[bpll].rate_table[0].sdiv
-             );
-       samsung_clk_register_pll(ctx, exynos3250_dmc_plls,
-                               ARRAY_SIZE(exynos3250_dmc_plls), dmc_reg_base);
-
-       samsung_clk_register_mux(ctx, dmc_mux_clks, ARRAY_SIZE(dmc_mux_clks));
-       samsung_clk_register_div(ctx, dmc_div_clks, ARRAY_SIZE(dmc_div_clks));
-
-       exynos3250_dmc_clk_sleep_init();
-
-       samsung_clk_of_add_provider(np, ctx);
+       samsung_cmu_register_one(np, &dmc_cmu_info);
 }
 CLK_OF_DECLARE(exynos3250_cmu_dmc, "samsung,exynos3250-cmu-dmc",
                exynos3250_cmu_dmc_init);
index 88e8c6bbd77ff8ea6919bc6c1ef7bd3fdc1324ae..51462e85675f7f6ed04dc6d3f38891c400aea7e4 100644 (file)
@@ -703,12 +703,12 @@ static struct samsung_mux_clock exynos4x12_mux_clks[] __initdata = {
 
 /* list of divider clocks supported in all exynos4 soc's */
 static struct samsung_div_clock exynos4_div_clks[] __initdata = {
-       DIV(0, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
+       DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 3),
        DIV(0, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
        DIV(0, "div_clkout_leftbus", "mout_clkout_leftbus",
                        CLKOUT_CMU_LEFTBUS, 8, 6),
 
-       DIV(0, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3),
+       DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 3),
        DIV(0, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
        DIV(0, "div_clkout_rightbus", "mout_clkout_rightbus",
                        CLKOUT_CMU_RIGHTBUS, 8, 6),
@@ -781,10 +781,10 @@ static struct samsung_div_clock exynos4_div_clks[] __initdata = {
                        CLK_SET_RATE_PARENT, 0),
        DIV(0, "div_clkout_top", "mout_clkout_top", CLKOUT_CMU_TOP, 8, 6),
 
-       DIV(0, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3),
+       DIV(CLK_DIV_ACP, "div_acp", "mout_dmc_bus", DIV_DMC0, 0, 3),
        DIV(0, "div_acp_pclk", "div_acp", DIV_DMC0, 4, 3),
        DIV(0, "div_dphy", "mout_dphy", DIV_DMC0, 8, 3),
-       DIV(0, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3),
+       DIV(CLK_DIV_DMC, "div_dmc", "mout_dmc_bus", DIV_DMC0, 12, 3),
        DIV(0, "div_dmcd", "div_dmc", DIV_DMC0, 16, 3),
        DIV(0, "div_dmcp", "div_dmcd", DIV_DMC0, 20, 3),
        DIV(0, "div_pwi", "mout_pwi", DIV_DMC1, 8, 4),
@@ -829,7 +829,7 @@ static struct samsung_div_clock exynos4x12_div_clks[] __initdata = {
        DIV_F(CLK_DIV_MCUISP1, "div_mcuisp1", "div_mcuisp0", E4X12_DIV_ISP1,
                                                8, 3, CLK_GET_RATE_NOCACHE, 0),
        DIV(CLK_SCLK_FIMG2D, "sclk_fimg2d", "mout_g2d", DIV_DMC1, 0, 4),
-       DIV(0, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
+       DIV(CLK_DIV_C2C, "div_c2c", "mout_c2c", DIV_DMC1, 4, 3),
        DIV(0, "div_c2c_aclk", "div_c2c", DIV_DMC1, 12, 3),
 };
 
index 2123fc251e0f6960b7566759088914162b840c89..6c78b09c829f9148453f56d4f05981452f91487c 100644 (file)
 #define DIV_CPU0               0x14500
 #define DIV_CPU1               0x14504
 
-enum exynos4415_plls {
-       apll, epll, g3d_pll, isp_pll, disp_pll,
-       nr_plls,
-};
-
-static struct samsung_clk_provider *exynos4415_ctx;
-
-/*
- * Support for CMU save/restore across system suspends
- */
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos4415_clk_regs;
-
 static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
        SRC_LEFTBUS,
        DIV_LEFTBUS,
@@ -219,41 +206,6 @@ static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
        DIV_CPU1,
 };
 
-static int exynos4415_clk_suspend(void)
-{
-       samsung_clk_save(exynos4415_ctx->reg_base, exynos4415_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_clk_regs));
-
-       return 0;
-}
-
-static void exynos4415_clk_resume(void)
-{
-       samsung_clk_restore(exynos4415_ctx->reg_base, exynos4415_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_clk_regs));
-}
-
-static struct syscore_ops exynos4415_clk_syscore_ops = {
-       .suspend = exynos4415_clk_suspend,
-       .resume = exynos4415_clk_resume,
-};
-
-static void exynos4415_clk_sleep_init(void)
-{
-       exynos4415_clk_regs =
-               samsung_clk_alloc_reg_dump(exynos4415_cmu_clk_regs,
-                                       ARRAY_SIZE(exynos4415_cmu_clk_regs));
-       if (!exynos4415_clk_regs) {
-               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-               return;
-       }
-
-       register_syscore_ops(&exynos4415_clk_syscore_ops);
-}
-#else
-static inline void exynos4415_clk_sleep_init(void) { }
-#endif
-
 /* list of all parent clock list */
 PNAME(mout_g3d_pllsrc_p)       = { "fin_pll", };
 
@@ -959,56 +911,40 @@ static struct samsung_pll_rate_table exynos4415_epll_rates[] = {
        { /* sentinel */ }
 };
 
-static struct samsung_pll_clock exynos4415_plls[nr_plls] __initdata = {
-       [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
-                       APLL_LOCK, APLL_CON0, NULL),
-       [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
-                       EPLL_LOCK, EPLL_CON0, NULL),
-       [g3d_pll] = PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll",
-                       "mout_g3d_pllsrc", G3D_PLL_LOCK, G3D_PLL_CON0, NULL),
-       [isp_pll] = PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
-                       ISP_PLL_LOCK, ISP_PLL_CON0, NULL),
-       [disp_pll] = PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
-                       "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, NULL),
+static struct samsung_pll_clock exynos4415_plls[] __initdata = {
+       PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+               APLL_LOCK, APLL_CON0, exynos4415_pll_rates),
+       PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+               EPLL_LOCK, EPLL_CON0, exynos4415_epll_rates),
+       PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll", "mout_g3d_pllsrc",
+               G3D_PLL_LOCK, G3D_PLL_CON0, exynos4415_pll_rates),
+       PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
+               ISP_PLL_LOCK, ISP_PLL_CON0, exynos4415_pll_rates),
+       PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
+               "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, exynos4415_pll_rates),
+};
+
+static struct samsung_cmu_info cmu_info __initdata = {
+       .pll_clks               = exynos4415_plls,
+       .nr_pll_clks            = ARRAY_SIZE(exynos4415_plls),
+       .mux_clks               = exynos4415_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(exynos4415_mux_clks),
+       .div_clks               = exynos4415_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(exynos4415_div_clks),
+       .gate_clks              = exynos4415_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(exynos4415_gate_clks),
+       .fixed_clks             = exynos4415_fixed_rate_clks,
+       .nr_fixed_clks          = ARRAY_SIZE(exynos4415_fixed_rate_clks),
+       .fixed_factor_clks      = exynos4415_fixed_factor_clks,
+       .nr_fixed_factor_clks   = ARRAY_SIZE(exynos4415_fixed_factor_clks),
+       .nr_clk_ids             = CLK_NR_CLKS,
+       .clk_regs               = exynos4415_cmu_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(exynos4415_cmu_clk_regs),
 };
 
 static void __init exynos4415_cmu_init(struct device_node *np)
 {
-       void __iomem *reg_base;
-
-       reg_base = of_iomap(np, 0);
-       if (!reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       exynos4415_ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
-       if (!exynos4415_ctx)
-               panic("%s: unable to allocate context.\n", __func__);
-
-       exynos4415_plls[apll].rate_table = exynos4415_pll_rates;
-       exynos4415_plls[epll].rate_table = exynos4415_epll_rates;
-       exynos4415_plls[g3d_pll].rate_table = exynos4415_pll_rates;
-       exynos4415_plls[isp_pll].rate_table = exynos4415_pll_rates;
-       exynos4415_plls[disp_pll].rate_table = exynos4415_pll_rates;
-
-       samsung_clk_register_fixed_factor(exynos4415_ctx,
-                               exynos4415_fixed_factor_clks,
-                               ARRAY_SIZE(exynos4415_fixed_factor_clks));
-       samsung_clk_register_fixed_rate(exynos4415_ctx,
-                               exynos4415_fixed_rate_clks,
-                               ARRAY_SIZE(exynos4415_fixed_rate_clks));
-
-       samsung_clk_register_pll(exynos4415_ctx, exynos4415_plls,
-                               ARRAY_SIZE(exynos4415_plls), reg_base);
-       samsung_clk_register_mux(exynos4415_ctx, exynos4415_mux_clks,
-                               ARRAY_SIZE(exynos4415_mux_clks));
-       samsung_clk_register_div(exynos4415_ctx, exynos4415_div_clks,
-                               ARRAY_SIZE(exynos4415_div_clks));
-       samsung_clk_register_gate(exynos4415_ctx, exynos4415_gate_clks,
-                               ARRAY_SIZE(exynos4415_gate_clks));
-
-       exynos4415_clk_sleep_init();
-
-       samsung_clk_of_add_provider(np, exynos4415_ctx);
+       samsung_cmu_register_one(np, &cmu_info);
 }
 CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
 
@@ -1027,16 +963,6 @@ CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
 #define SRC_DMC                        0x300
 #define DIV_DMC1               0x504
 
-enum exynos4415_dmc_plls {
-       mpll, bpll,
-       nr_dmc_plls,
-};
-
-static struct samsung_clk_provider *exynos4415_dmc_ctx;
-
-#ifdef CONFIG_PM_SLEEP
-static struct samsung_clk_reg_dump *exynos4415_dmc_clk_regs;
-
 static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
        MPLL_LOCK,
        MPLL_CON0,
@@ -1050,42 +976,6 @@ static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
        DIV_DMC1,
 };
 
-static int exynos4415_dmc_clk_suspend(void)
-{
-       samsung_clk_save(exynos4415_dmc_ctx->reg_base,
-                               exynos4415_dmc_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
-       return 0;
-}
-
-static void exynos4415_dmc_clk_resume(void)
-{
-       samsung_clk_restore(exynos4415_dmc_ctx->reg_base,
-                               exynos4415_dmc_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
-}
-
-static struct syscore_ops exynos4415_dmc_clk_syscore_ops = {
-       .suspend = exynos4415_dmc_clk_suspend,
-       .resume = exynos4415_dmc_clk_resume,
-};
-
-static void exynos4415_dmc_clk_sleep_init(void)
-{
-       exynos4415_dmc_clk_regs =
-               samsung_clk_alloc_reg_dump(exynos4415_cmu_dmc_clk_regs,
-                               ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
-       if (!exynos4415_dmc_clk_regs) {
-               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
-               return;
-       }
-
-       register_syscore_ops(&exynos4415_dmc_clk_syscore_ops);
-}
-#else
-static inline void exynos4415_dmc_clk_sleep_init(void) { }
-#endif /* CONFIG_PM_SLEEP */
-
 PNAME(mout_mpll_p)             = { "fin_pll", "fout_mpll", };
 PNAME(mout_bpll_p)             = { "fin_pll", "fout_bpll", };
 PNAME(mbpll_p)                 = { "mout_mpll", "mout_bpll", };
@@ -1107,38 +997,28 @@ static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = {
        DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2),
 };
 
-static struct samsung_pll_clock exynos4415_dmc_plls[nr_dmc_plls] __initdata = {
-       [mpll] = PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
-               MPLL_LOCK, MPLL_CON0, NULL),
-       [bpll] = PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
-               BPLL_LOCK, BPLL_CON0, NULL),
+static struct samsung_pll_clock exynos4415_dmc_plls[] __initdata = {
+       PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
+               MPLL_LOCK, MPLL_CON0, exynos4415_pll_rates),
+       PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
+               BPLL_LOCK, BPLL_CON0, exynos4415_pll_rates),
+};
+
+static struct samsung_cmu_info cmu_dmc_info __initdata = {
+       .pll_clks               = exynos4415_dmc_plls,
+       .nr_pll_clks            = ARRAY_SIZE(exynos4415_dmc_plls),
+       .mux_clks               = exynos4415_dmc_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(exynos4415_dmc_mux_clks),
+       .div_clks               = exynos4415_dmc_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(exynos4415_dmc_div_clks),
+       .nr_clk_ids             = NR_CLKS_DMC,
+       .clk_regs               = exynos4415_cmu_dmc_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs),
 };
 
 static void __init exynos4415_cmu_dmc_init(struct device_node *np)
 {
-       void __iomem *reg_base;
-
-       reg_base = of_iomap(np, 0);
-       if (!reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       exynos4415_dmc_ctx = samsung_clk_init(np, reg_base, NR_CLKS_DMC);
-       if (!exynos4415_dmc_ctx)
-               panic("%s: unable to allocate context.\n", __func__);
-
-       exynos4415_dmc_plls[mpll].rate_table = exynos4415_pll_rates;
-       exynos4415_dmc_plls[bpll].rate_table = exynos4415_pll_rates;
-
-       samsung_clk_register_pll(exynos4415_dmc_ctx, exynos4415_dmc_plls,
-                               ARRAY_SIZE(exynos4415_dmc_plls), reg_base);
-       samsung_clk_register_mux(exynos4415_dmc_ctx, exynos4415_dmc_mux_clks,
-                               ARRAY_SIZE(exynos4415_dmc_mux_clks));
-       samsung_clk_register_div(exynos4415_dmc_ctx, exynos4415_dmc_div_clks,
-                               ARRAY_SIZE(exynos4415_dmc_div_clks));
-
-       exynos4415_dmc_clk_sleep_init();
-
-       samsung_clk_of_add_provider(np, exynos4415_dmc_ctx);
+       samsung_cmu_register_one(np, &cmu_dmc_info);
 }
 CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc",
                exynos4415_cmu_dmc_init);
index ea4483b8d62e8a89ea2ef1c1d37b1e720c5d9f68..03d36e847b78e067d16bdfe1758df10560c4a2b4 100644 (file)
@@ -34,6 +34,7 @@
 #define DIV_TOPC0              0x0600
 #define DIV_TOPC1              0x0604
 #define DIV_TOPC3              0x060C
+#define ENABLE_ACLK_TOPC1      0x0804
 
 static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
        FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0),
@@ -45,6 +46,7 @@ static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
 };
 
 /* List of parent clocks for Muxes in CMU_TOPC */
+PNAME(mout_aud_pll_ctrl_p)     = { "fin_pll", "fout_aud_pll" };
 PNAME(mout_bus0_pll_ctrl_p)    = { "fin_pll", "fout_bus0_pll" };
 PNAME(mout_bus1_pll_ctrl_p)    = { "fin_pll", "fout_bus1_pll" };
 PNAME(mout_cc_pll_ctrl_p)      = { "fin_pll", "fout_cc_pll" };
@@ -104,9 +106,11 @@ static struct samsung_mux_clock topc_mux_clks[] __initdata = {
 
        MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p,
                MUX_SEL_TOPC1, 16, 1),
+       MUX(0, "mout_aud_pll_ctrl", mout_aud_pll_ctrl_p, MUX_SEL_TOPC1, 0, 1),
 
        MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2),
 
+       MUX(0, "mout_aclk_mscl_532", mout_topc_group2, MUX_SEL_TOPC3, 20, 2),
        MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2),
 };
 
@@ -114,6 +118,8 @@ static struct samsung_div_clock topc_div_clks[] __initdata = {
        DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133",
                DIV_TOPC0, 4, 4),
 
+       DIV(DOUT_ACLK_MSCL_532, "dout_aclk_mscl_532", "mout_aclk_mscl_532",
+               DIV_TOPC1, 20, 4),
        DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66",
                DIV_TOPC1, 24, 4),
 
@@ -125,6 +131,18 @@ static struct samsung_div_clock topc_div_clks[] __initdata = {
                DIV_TOPC3, 12, 3),
        DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl",
                DIV_TOPC3, 16, 3),
+       DIV(DOUT_SCLK_AUD_PLL, "dout_sclk_aud_pll", "mout_aud_pll_ctrl",
+               DIV_TOPC3, 28, 3),
+};
+
+static struct samsung_pll_rate_table pll1460x_24mhz_tbl[] __initdata = {
+       PLL_36XX_RATE(491520000, 20, 1, 0, 31457),
+       {},
+};
+
+static struct samsung_gate_clock topc_gate_clks[] __initdata = {
+       GATE(ACLK_MSCL_532, "aclk_mscl_532", "dout_aclk_mscl_532",
+               ENABLE_ACLK_TOPC1, 20, 0, 0),
 };
 
 static struct samsung_pll_clock topc_pll_clks[] __initdata = {
@@ -136,8 +154,8 @@ static struct samsung_pll_clock topc_pll_clks[] __initdata = {
                BUS1_DPLL_CON0, NULL),
        PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK,
                MFC_PLL_CON0, NULL),
-       PLL(pll_1460x, 0, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
-               AUD_PLL_CON0, NULL),
+       PLL(pll_1460x, FOUT_AUD_PLL, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
+               AUD_PLL_CON0, pll1460x_24mhz_tbl),
 };
 
 static struct samsung_cmu_info topc_cmu_info __initdata = {
@@ -147,6 +165,8 @@ static struct samsung_cmu_info topc_cmu_info __initdata = {
        .nr_mux_clks            = ARRAY_SIZE(topc_mux_clks),
        .div_clks               = topc_div_clks,
        .nr_div_clks            = ARRAY_SIZE(topc_div_clks),
+       .gate_clks              = topc_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(topc_gate_clks),
        .fixed_factor_clks      = topc_fixed_factor_clks,
        .nr_fixed_factor_clks   = ARRAY_SIZE(topc_fixed_factor_clks),
        .nr_clk_ids             = TOPC_NR_CLK,
@@ -166,9 +186,18 @@ CLK_OF_DECLARE(exynos7_clk_topc, "samsung,exynos7-clock-topc",
 #define MUX_SEL_TOP00                  0x0200
 #define MUX_SEL_TOP01                  0x0204
 #define MUX_SEL_TOP03                  0x020C
+#define MUX_SEL_TOP0_PERIC0            0x0230
+#define MUX_SEL_TOP0_PERIC1            0x0234
+#define MUX_SEL_TOP0_PERIC2            0x0238
 #define MUX_SEL_TOP0_PERIC3            0x023C
 #define DIV_TOP03                      0x060C
+#define DIV_TOP0_PERIC0                        0x0630
+#define DIV_TOP0_PERIC1                        0x0634
+#define DIV_TOP0_PERIC2                        0x0638
 #define DIV_TOP0_PERIC3                        0x063C
+#define ENABLE_SCLK_TOP0_PERIC0                0x0A30
+#define ENABLE_SCLK_TOP0_PERIC1                0x0A34
+#define ENABLE_SCLK_TOP0_PERIC2                0x0A38
 #define ENABLE_SCLK_TOP0_PERIC3                0x0A3C
 
 /* List of parent clocks for Muxes in CMU_TOP0 */
@@ -176,6 +205,7 @@ PNAME(mout_bus0_pll_p)      = { "fin_pll", "dout_sclk_bus0_pll" };
 PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" };
 PNAME(mout_cc_pll_p)   = { "fin_pll", "dout_sclk_cc_pll" };
 PNAME(mout_mfc_pll_p)  = { "fin_pll", "dout_sclk_mfc_pll" };
+PNAME(mout_aud_pll_p)  = { "fin_pll", "dout_sclk_aud_pll" };
 
 PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll",
        "ffac_top0_bus0_pll_div2"};
@@ -189,18 +219,34 @@ PNAME(mout_top0_half_mfc_pll_p) = {"mout_top0_mfc_pll",
 PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll",
        "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll",
        "mout_top0_half_mfc_pll"};
+PNAME(mout_top0_group3) = {"ioclk_audiocdclk0",
+       "ioclk_audiocdclk1", "ioclk_spdif_extclk",
+       "mout_top0_aud_pll", "mout_top0_half_bus0_pll",
+       "mout_top0_half_bus1_pll"};
+PNAME(mout_top0_group4) = {"ioclk_audiocdclk1", "mout_top0_aud_pll",
+       "mout_top0_half_bus0_pll", "mout_top0_half_bus1_pll"};
 
 static unsigned long top0_clk_regs[] __initdata = {
        MUX_SEL_TOP00,
        MUX_SEL_TOP01,
        MUX_SEL_TOP03,
+       MUX_SEL_TOP0_PERIC0,
+       MUX_SEL_TOP0_PERIC1,
+       MUX_SEL_TOP0_PERIC2,
        MUX_SEL_TOP0_PERIC3,
        DIV_TOP03,
+       DIV_TOP0_PERIC0,
+       DIV_TOP0_PERIC1,
+       DIV_TOP0_PERIC2,
        DIV_TOP0_PERIC3,
+       ENABLE_SCLK_TOP0_PERIC0,
+       ENABLE_SCLK_TOP0_PERIC1,
+       ENABLE_SCLK_TOP0_PERIC2,
        ENABLE_SCLK_TOP0_PERIC3,
 };
 
 static struct samsung_mux_clock top0_mux_clks[] __initdata = {
+       MUX(0, "mout_top0_aud_pll", mout_aud_pll_p, MUX_SEL_TOP00, 0, 1),
        MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1),
        MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1),
        MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1),
@@ -218,10 +264,20 @@ static struct samsung_mux_clock top0_mux_clks[] __initdata = {
        MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2),
        MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2),
 
+       MUX(0, "mout_sclk_spdif", mout_top0_group3, MUX_SEL_TOP0_PERIC0, 4, 3),
+       MUX(0, "mout_sclk_pcm1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 8, 2),
+       MUX(0, "mout_sclk_i2s1", mout_top0_group4, MUX_SEL_TOP0_PERIC0, 20, 2),
+
+       MUX(0, "mout_sclk_spi1", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 8, 2),
+       MUX(0, "mout_sclk_spi0", mout_top0_group1, MUX_SEL_TOP0_PERIC1, 20, 2),
+
+       MUX(0, "mout_sclk_spi3", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 8, 2),
+       MUX(0, "mout_sclk_spi2", mout_top0_group1, MUX_SEL_TOP0_PERIC2, 20, 2),
        MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2),
        MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2),
        MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2),
        MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2),
+       MUX(0, "mout_sclk_spi4", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 20, 2),
 };
 
 static struct samsung_div_clock top0_div_clks[] __initdata = {
@@ -230,13 +286,40 @@ static struct samsung_div_clock top0_div_clks[] __initdata = {
        DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66",
                DIV_TOP03, 20, 6),
 
+       DIV(0, "dout_sclk_spdif", "mout_sclk_spdif", DIV_TOP0_PERIC0, 4, 4),
+       DIV(0, "dout_sclk_pcm1", "mout_sclk_pcm1", DIV_TOP0_PERIC0, 8, 12),
+       DIV(0, "dout_sclk_i2s1", "mout_sclk_i2s1", DIV_TOP0_PERIC0, 20, 10),
+
+       DIV(0, "dout_sclk_spi1", "mout_sclk_spi1", DIV_TOP0_PERIC1, 8, 12),
+       DIV(0, "dout_sclk_spi0", "mout_sclk_spi0", DIV_TOP0_PERIC1, 20, 12),
+
+       DIV(0, "dout_sclk_spi3", "mout_sclk_spi3", DIV_TOP0_PERIC2, 8, 12),
+       DIV(0, "dout_sclk_spi2", "mout_sclk_spi2", DIV_TOP0_PERIC2, 20, 12),
+
        DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4),
        DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4),
        DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4),
        DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4),
+       DIV(0, "dout_sclk_spi4", "mout_sclk_spi4", DIV_TOP0_PERIC3, 20, 12),
 };
 
 static struct samsung_gate_clock top0_gate_clks[] __initdata = {
+       GATE(CLK_SCLK_SPDIF, "sclk_spdif", "dout_sclk_spdif",
+               ENABLE_SCLK_TOP0_PERIC0, 4, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_PCM1, "sclk_pcm1", "dout_sclk_pcm1",
+               ENABLE_SCLK_TOP0_PERIC0, 8, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_I2S1, "sclk_i2s1", "dout_sclk_i2s1",
+               ENABLE_SCLK_TOP0_PERIC0, 20, CLK_SET_RATE_PARENT, 0),
+
+       GATE(CLK_SCLK_SPI1, "sclk_spi1", "dout_sclk_spi1",
+               ENABLE_SCLK_TOP0_PERIC1, 8, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_SPI0, "sclk_spi0", "dout_sclk_spi0",
+               ENABLE_SCLK_TOP0_PERIC1, 20, CLK_SET_RATE_PARENT, 0),
+
+       GATE(CLK_SCLK_SPI3, "sclk_spi3", "dout_sclk_spi3",
+               ENABLE_SCLK_TOP0_PERIC2, 8, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_SPI2, "sclk_spi2", "dout_sclk_spi2",
+               ENABLE_SCLK_TOP0_PERIC2, 20, CLK_SET_RATE_PARENT, 0),
        GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3",
                ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0),
        GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2",
@@ -245,6 +328,8 @@ static struct samsung_gate_clock top0_gate_clks[] __initdata = {
                ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0),
        GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0",
                ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0),
+       GATE(CLK_SCLK_SPI4, "sclk_spi4", "dout_sclk_spi4",
+               ENABLE_SCLK_TOP0_PERIC3, 20, CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
@@ -343,6 +428,8 @@ static struct samsung_mux_clock top1_mux_clks[] __initdata = {
        MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2),
 
        MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2),
+       MUX(0, "mout_sclk_usbdrd300", mout_top1_group1,
+               MUX_SEL_TOP1_FSYS0, 28, 2),
 
        MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2),
        MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2),
@@ -356,6 +443,8 @@ static struct samsung_div_clock top1_div_clks[] __initdata = {
 
        DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2",
                DIV_TOP1_FSYS0, 24, 4),
+       DIV(0, "dout_sclk_usbdrd300", "mout_sclk_usbdrd300",
+               DIV_TOP1_FSYS0, 28, 4),
 
        DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1",
                DIV_TOP1_FSYS1, 24, 4),
@@ -366,6 +455,8 @@ static struct samsung_div_clock top1_div_clks[] __initdata = {
 static struct samsung_gate_clock top1_gate_clks[] __initdata = {
        GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2",
                ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0),
+       GATE(0, "sclk_usbdrd300", "dout_sclk_usbdrd300",
+               ENABLE_SCLK_TOP1_FSYS0, 28, 0, 0),
 
        GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1",
                ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0),
@@ -514,6 +605,7 @@ static void __init exynos7_clk_peric0_init(struct device_node *np)
 /* Register Offset definitions for CMU_PERIC1 (0x14C80000) */
 #define MUX_SEL_PERIC10                        0x0200
 #define MUX_SEL_PERIC11                        0x0204
+#define MUX_SEL_PERIC12                        0x0208
 #define ENABLE_PCLK_PERIC1             0x0900
 #define ENABLE_SCLK_PERIC10            0x0A00
 
@@ -525,10 +617,16 @@ PNAME(mout_aclk_peric1_66_p)      = { "fin_pll", "dout_aclk_peric1_66" };
 PNAME(mout_sclk_uart1_p)       = { "fin_pll", "sclk_uart1" };
 PNAME(mout_sclk_uart2_p)       = { "fin_pll", "sclk_uart2" };
 PNAME(mout_sclk_uart3_p)       = { "fin_pll", "sclk_uart3" };
+PNAME(mout_sclk_spi0_p)                = { "fin_pll", "sclk_spi0" };
+PNAME(mout_sclk_spi1_p)                = { "fin_pll", "sclk_spi1" };
+PNAME(mout_sclk_spi2_p)                = { "fin_pll", "sclk_spi2" };
+PNAME(mout_sclk_spi3_p)                = { "fin_pll", "sclk_spi3" };
+PNAME(mout_sclk_spi4_p)                = { "fin_pll", "sclk_spi4" };
 
 static unsigned long peric1_clk_regs[] __initdata = {
        MUX_SEL_PERIC10,
        MUX_SEL_PERIC11,
+       MUX_SEL_PERIC12,
        ENABLE_PCLK_PERIC1,
        ENABLE_SCLK_PERIC10,
 };
@@ -537,6 +635,16 @@ static struct samsung_mux_clock peric1_mux_clks[] __initdata = {
        MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p,
                MUX_SEL_PERIC10, 0, 1),
 
+       MUX_F(0, "mout_sclk_spi0_user", mout_sclk_spi0_p,
+               MUX_SEL_PERIC11, 0, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(0, "mout_sclk_spi1_user", mout_sclk_spi1_p,
+               MUX_SEL_PERIC11, 4, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(0, "mout_sclk_spi2_user", mout_sclk_spi2_p,
+               MUX_SEL_PERIC11, 8, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(0, "mout_sclk_spi3_user", mout_sclk_spi3_p,
+               MUX_SEL_PERIC11, 12, 1, CLK_SET_RATE_PARENT, 0),
+       MUX_F(0, "mout_sclk_spi4_user", mout_sclk_spi4_p,
+               MUX_SEL_PERIC11, 16, 1, CLK_SET_RATE_PARENT, 0),
        MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p,
                MUX_SEL_PERIC11, 20, 1),
        MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p,
@@ -562,6 +670,22 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
                ENABLE_PCLK_PERIC1, 10, 0, 0),
        GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user",
                ENABLE_PCLK_PERIC1, 11, 0, 0),
+       GATE(PCLK_SPI0, "pclk_spi0", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 12, 0, 0),
+       GATE(PCLK_SPI1, "pclk_spi1", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 13, 0, 0),
+       GATE(PCLK_SPI2, "pclk_spi2", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 14, 0, 0),
+       GATE(PCLK_SPI3, "pclk_spi3", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 15, 0, 0),
+       GATE(PCLK_SPI4, "pclk_spi4", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 16, 0, 0),
+       GATE(PCLK_I2S1, "pclk_i2s1", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 17, CLK_SET_RATE_PARENT, 0),
+       GATE(PCLK_PCM1, "pclk_pcm1", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 18, 0, 0),
+       GATE(PCLK_SPDIF, "pclk_spdif", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 19, 0, 0),
 
        GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user",
                ENABLE_SCLK_PERIC10, 9, 0, 0),
@@ -569,6 +693,22 @@ static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
                ENABLE_SCLK_PERIC10, 10, 0, 0),
        GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user",
                ENABLE_SCLK_PERIC10, 11, 0, 0),
+       GATE(SCLK_SPI0, "sclk_spi0_user", "mout_sclk_spi0_user",
+               ENABLE_SCLK_PERIC10, 12, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPI1, "sclk_spi1_user", "mout_sclk_spi1_user",
+               ENABLE_SCLK_PERIC10, 13, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPI2, "sclk_spi2_user", "mout_sclk_spi2_user",
+               ENABLE_SCLK_PERIC10, 14, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPI3, "sclk_spi3_user", "mout_sclk_spi3_user",
+               ENABLE_SCLK_PERIC10, 15, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPI4, "sclk_spi4_user", "mout_sclk_spi4_user",
+               ENABLE_SCLK_PERIC10, 16, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_I2S1, "sclk_i2s1_user", "sclk_i2s1",
+               ENABLE_SCLK_PERIC10, 17, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_PCM1, "sclk_pcm1_user", "sclk_pcm1",
+               ENABLE_SCLK_PERIC10, 18, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_SPDIF, "sclk_spdif_user", "sclk_spdif",
+               ENABLE_SCLK_PERIC10, 19, CLK_SET_RATE_PARENT, 0),
 };
 
 static struct samsung_cmu_info peric1_cmu_info __initdata = {
@@ -647,7 +787,12 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
 /* Register Offset definitions for CMU_FSYS0 (0x10E90000) */
 #define MUX_SEL_FSYS00                 0x0200
 #define MUX_SEL_FSYS01                 0x0204
+#define MUX_SEL_FSYS02                 0x0208
+#define ENABLE_ACLK_FSYS00             0x0800
 #define ENABLE_ACLK_FSYS01             0x0804
+#define ENABLE_SCLK_FSYS01             0x0A04
+#define ENABLE_SCLK_FSYS02             0x0A08
+#define ENABLE_SCLK_FSYS04             0x0A10
 
 /*
  * List of parent clocks for Muxes in CMU_FSYS0
@@ -655,10 +800,29 @@ CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
 PNAME(mout_aclk_fsys0_200_p)   = { "fin_pll", "dout_aclk_fsys0_200" };
 PNAME(mout_sclk_mmc2_p)                = { "fin_pll", "sclk_mmc2" };
 
+PNAME(mout_sclk_usbdrd300_p)   = { "fin_pll", "sclk_usbdrd300" };
+PNAME(mout_phyclk_usbdrd300_udrd30_phyclk_p)   = { "fin_pll",
+                               "phyclk_usbdrd300_udrd30_phyclock" };
+PNAME(mout_phyclk_usbdrd300_udrd30_pipe_pclk_p)        = { "fin_pll",
+                               "phyclk_usbdrd300_udrd30_pipe_pclk" };
+
+/* fixed rate clocks used in the FSYS0 block */
+struct samsung_fixed_rate_clock fixed_rate_clks_fsys0[] __initdata = {
+       FRATE(0, "phyclk_usbdrd300_udrd30_phyclock", NULL,
+               CLK_IS_ROOT, 60000000),
+       FRATE(0, "phyclk_usbdrd300_udrd30_pipe_pclk", NULL,
+               CLK_IS_ROOT, 125000000),
+};
+
 static unsigned long fsys0_clk_regs[] __initdata = {
        MUX_SEL_FSYS00,
        MUX_SEL_FSYS01,
+       MUX_SEL_FSYS02,
+       ENABLE_ACLK_FSYS00,
        ENABLE_ACLK_FSYS01,
+       ENABLE_SCLK_FSYS01,
+       ENABLE_SCLK_FSYS02,
+       ENABLE_SCLK_FSYS04,
 };
 
 static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
@@ -666,11 +830,49 @@ static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
                MUX_SEL_FSYS00, 24, 1),
 
        MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1),
+       MUX(0, "mout_sclk_usbdrd300_user", mout_sclk_usbdrd300_p,
+               MUX_SEL_FSYS01, 28, 1),
+
+       MUX(0, "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
+               mout_phyclk_usbdrd300_udrd30_pipe_pclk_p,
+               MUX_SEL_FSYS02, 24, 1),
+       MUX(0, "mout_phyclk_usbdrd300_udrd30_phyclk_user",
+               mout_phyclk_usbdrd300_udrd30_phyclk_p,
+               MUX_SEL_FSYS02, 28, 1),
 };
 
 static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
+       GATE(ACLK_AXIUS_USBDRD30X_FSYS0X, "aclk_axius_usbdrd30x_fsys0x",
+               "mout_aclk_fsys0_200_user",
+               ENABLE_ACLK_FSYS00, 19, 0, 0),
+       GATE(ACLK_PDMA1, "aclk_pdma1", "mout_aclk_fsys0_200_user",
+                       ENABLE_ACLK_FSYS00, 3, 0, 0),
+       GATE(ACLK_PDMA0, "aclk_pdma0", "mout_aclk_fsys0_200_user",
+                       ENABLE_ACLK_FSYS00, 4, 0, 0),
+
+       GATE(ACLK_USBDRD300, "aclk_usbdrd300", "mout_aclk_fsys0_200_user",
+               ENABLE_ACLK_FSYS01, 29, 0, 0),
        GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user",
                ENABLE_ACLK_FSYS01, 31, 0, 0),
+
+       GATE(SCLK_USBDRD300_SUSPENDCLK, "sclk_usbdrd300_suspendclk",
+               "mout_sclk_usbdrd300_user",
+               ENABLE_SCLK_FSYS01, 4, 0, 0),
+       GATE(SCLK_USBDRD300_REFCLK, "sclk_usbdrd300_refclk", "fin_pll",
+               ENABLE_SCLK_FSYS01, 8, 0, 0),
+
+       GATE(PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER,
+               "phyclk_usbdrd300_udrd30_pipe_pclk_user",
+               "mout_phyclk_usbdrd300_udrd30_pipe_pclk_user",
+               ENABLE_SCLK_FSYS02, 24, 0, 0),
+       GATE(PHYCLK_USBDRD300_UDRD30_PHYCLK_USER,
+               "phyclk_usbdrd300_udrd30_phyclk_user",
+               "mout_phyclk_usbdrd300_udrd30_phyclk_user",
+               ENABLE_SCLK_FSYS02, 28, 0, 0),
+
+       GATE(OSCCLK_PHY_CLKOUT_USB30_PHY, "oscclk_phy_clkout_usb30_phy",
+               "fin_pll",
+               ENABLE_SCLK_FSYS04, 28, 0, 0),
 };
 
 static struct samsung_cmu_info fsys0_cmu_info __initdata = {
@@ -741,3 +943,205 @@ static void __init exynos7_clk_fsys1_init(struct device_node *np)
 
 CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1",
        exynos7_clk_fsys1_init);
+
+#define MUX_SEL_MSCL                   0x0200
+#define DIV_MSCL                       0x0600
+#define ENABLE_ACLK_MSCL               0x0800
+#define ENABLE_PCLK_MSCL               0x0900
+
+/* List of parent clocks for Muxes in CMU_MSCL */
+PNAME(mout_aclk_mscl_532_user_p)       = { "fin_pll", "aclk_mscl_532" };
+
+static unsigned long mscl_clk_regs[] __initdata = {
+       MUX_SEL_MSCL,
+       DIV_MSCL,
+       ENABLE_ACLK_MSCL,
+       ENABLE_PCLK_MSCL,
+};
+
+static struct samsung_mux_clock mscl_mux_clks[] __initdata = {
+       MUX(USERMUX_ACLK_MSCL_532, "usermux_aclk_mscl_532",
+               mout_aclk_mscl_532_user_p, MUX_SEL_MSCL, 0, 1),
+};
+static struct samsung_div_clock mscl_div_clks[] __initdata = {
+       DIV(DOUT_PCLK_MSCL, "dout_pclk_mscl", "usermux_aclk_mscl_532",
+                       DIV_MSCL, 0, 3),
+};
+static struct samsung_gate_clock mscl_gate_clks[] __initdata = {
+
+       GATE(ACLK_MSCL_0, "aclk_mscl_0", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 31, 0, 0),
+       GATE(ACLK_MSCL_1, "aclk_mscl_1", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 30, 0, 0),
+       GATE(ACLK_JPEG, "aclk_jpeg", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 29, 0, 0),
+       GATE(ACLK_G2D, "aclk_g2d", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 28, 0, 0),
+       GATE(ACLK_LH_ASYNC_SI_MSCL_0, "aclk_lh_async_si_mscl_0",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 27, 0, 0),
+       GATE(ACLK_LH_ASYNC_SI_MSCL_1, "aclk_lh_async_si_mscl_1",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 26, 0, 0),
+       GATE(ACLK_XIU_MSCLX_0, "aclk_xiu_msclx_0", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 25, 0, 0),
+       GATE(ACLK_XIU_MSCLX_1, "aclk_xiu_msclx_1", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 24, 0, 0),
+       GATE(ACLK_AXI2ACEL_BRIDGE, "aclk_axi2acel_bridge",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 23, 0, 0),
+       GATE(ACLK_QE_MSCL_0, "aclk_qe_mscl_0", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 22, 0, 0),
+       GATE(ACLK_QE_MSCL_1, "aclk_qe_mscl_1", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 21, 0, 0),
+       GATE(ACLK_QE_JPEG, "aclk_qe_jpeg", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 20, 0, 0),
+       GATE(ACLK_QE_G2D, "aclk_qe_g2d", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 19, 0, 0),
+       GATE(ACLK_PPMU_MSCL_0, "aclk_ppmu_mscl_0", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 18, 0, 0),
+       GATE(ACLK_PPMU_MSCL_1, "aclk_ppmu_mscl_1", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 17, 0, 0),
+       GATE(ACLK_MSCLNP_133, "aclk_msclnp_133", "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 16, 0, 0),
+       GATE(ACLK_AHB2APB_MSCL0P, "aclk_ahb2apb_mscl0p",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 15, 0, 0),
+       GATE(ACLK_AHB2APB_MSCL1P, "aclk_ahb2apb_mscl1p",
+                       "usermux_aclk_mscl_532",
+                       ENABLE_ACLK_MSCL, 14, 0, 0),
+
+       GATE(PCLK_MSCL_0, "pclk_mscl_0", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 31, 0, 0),
+       GATE(PCLK_MSCL_1, "pclk_mscl_1", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 30, 0, 0),
+       GATE(PCLK_JPEG, "pclk_jpeg", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 29, 0, 0),
+       GATE(PCLK_G2D, "pclk_g2d", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 28, 0, 0),
+       GATE(PCLK_QE_MSCL_0, "pclk_qe_mscl_0", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 27, 0, 0),
+       GATE(PCLK_QE_MSCL_1, "pclk_qe_mscl_1", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 26, 0, 0),
+       GATE(PCLK_QE_JPEG, "pclk_qe_jpeg", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 25, 0, 0),
+       GATE(PCLK_QE_G2D, "pclk_qe_g2d", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 24, 0, 0),
+       GATE(PCLK_PPMU_MSCL_0, "pclk_ppmu_mscl_0", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 23, 0, 0),
+       GATE(PCLK_PPMU_MSCL_1, "pclk_ppmu_mscl_1", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 22, 0, 0),
+       GATE(PCLK_AXI2ACEL_BRIDGE, "pclk_axi2acel_bridge", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 21, 0, 0),
+       GATE(PCLK_PMU_MSCL, "pclk_pmu_mscl", "dout_pclk_mscl",
+                       ENABLE_PCLK_MSCL, 20, 0, 0),
+};
+
+static struct samsung_cmu_info mscl_cmu_info __initdata = {
+       .mux_clks               = mscl_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(mscl_mux_clks),
+       .div_clks               = mscl_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(mscl_div_clks),
+       .gate_clks              = mscl_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(mscl_gate_clks),
+       .nr_clk_ids             = MSCL_NR_CLK,
+       .clk_regs               = mscl_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(mscl_clk_regs),
+};
+
+static void __init exynos7_clk_mscl_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &mscl_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_mscl, "samsung,exynos7-clock-mscl",
+               exynos7_clk_mscl_init);
+
+/* Register Offset definitions for CMU_AUD (0x114C0000) */
+#define        MUX_SEL_AUD                     0x0200
+#define        DIV_AUD0                        0x0600
+#define        DIV_AUD1                        0x0604
+#define        ENABLE_ACLK_AUD                 0x0800
+#define        ENABLE_PCLK_AUD                 0x0900
+#define        ENABLE_SCLK_AUD                 0x0A00
+
+/*
+ * List of parent clocks for Muxes in CMU_AUD
+ */
+PNAME(mout_aud_pll_user_p) = { "fin_pll", "fout_aud_pll" };
+PNAME(mout_aud_group_p) = { "dout_aud_cdclk", "ioclk_audiocdclk0" };
+
+static unsigned long aud_clk_regs[] __initdata = {
+       MUX_SEL_AUD,
+       DIV_AUD0,
+       DIV_AUD1,
+       ENABLE_ACLK_AUD,
+       ENABLE_PCLK_AUD,
+       ENABLE_SCLK_AUD,
+};
+
+static struct samsung_mux_clock aud_mux_clks[] __initdata = {
+       MUX(0, "mout_sclk_i2s", mout_aud_group_p, MUX_SEL_AUD, 12, 1),
+       MUX(0, "mout_sclk_pcm", mout_aud_group_p, MUX_SEL_AUD, 16, 1),
+       MUX(0, "mout_aud_pll_user", mout_aud_pll_user_p, MUX_SEL_AUD, 20, 1),
+};
+
+static struct samsung_div_clock aud_div_clks[] __initdata = {
+       DIV(0, "dout_aud_ca5", "mout_aud_pll_user", DIV_AUD0, 0, 4),
+       DIV(0, "dout_aclk_aud", "dout_aud_ca5", DIV_AUD0, 4, 4),
+       DIV(0, "dout_aud_pclk_dbg", "dout_aud_ca5", DIV_AUD0, 8, 4),
+
+       DIV(0, "dout_sclk_i2s", "mout_sclk_i2s", DIV_AUD1, 0, 4),
+       DIV(0, "dout_sclk_pcm", "mout_sclk_pcm", DIV_AUD1, 4, 8),
+       DIV(0, "dout_sclk_uart", "dout_aud_cdclk", DIV_AUD1, 12, 4),
+       DIV(0, "dout_sclk_slimbus", "dout_aud_cdclk", DIV_AUD1, 16, 5),
+       DIV(0, "dout_aud_cdclk", "mout_aud_pll_user", DIV_AUD1, 24, 4),
+};
+
+static struct samsung_gate_clock aud_gate_clks[] __initdata = {
+       GATE(SCLK_PCM, "sclk_pcm", "dout_sclk_pcm",
+                       ENABLE_SCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
+       GATE(SCLK_I2S, "sclk_i2s", "dout_sclk_i2s",
+                       ENABLE_SCLK_AUD, 28, CLK_SET_RATE_PARENT, 0),
+       GATE(0, "sclk_uart", "dout_sclk_uart", ENABLE_SCLK_AUD, 29, 0, 0),
+       GATE(0, "sclk_slimbus", "dout_sclk_slimbus",
+                       ENABLE_SCLK_AUD, 30, 0, 0),
+
+       GATE(0, "pclk_dbg_aud", "dout_aud_pclk_dbg", ENABLE_PCLK_AUD, 19, 0, 0),
+       GATE(0, "pclk_gpio_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 20, 0, 0),
+       GATE(0, "pclk_wdt1", "dout_aclk_aud", ENABLE_PCLK_AUD, 22, 0, 0),
+       GATE(0, "pclk_wdt0", "dout_aclk_aud", ENABLE_PCLK_AUD, 23, 0, 0),
+       GATE(0, "pclk_slimbus", "dout_aclk_aud", ENABLE_PCLK_AUD, 24, 0, 0),
+       GATE(0, "pclk_uart", "dout_aclk_aud", ENABLE_PCLK_AUD, 25, 0, 0),
+       GATE(PCLK_PCM, "pclk_pcm", "dout_aclk_aud",
+                       ENABLE_PCLK_AUD, 26, CLK_SET_RATE_PARENT, 0),
+       GATE(PCLK_I2S, "pclk_i2s", "dout_aclk_aud",
+                       ENABLE_PCLK_AUD, 27, CLK_SET_RATE_PARENT, 0),
+       GATE(0, "pclk_timer", "dout_aclk_aud", ENABLE_PCLK_AUD, 28, 0, 0),
+       GATE(0, "pclk_smmu_aud", "dout_aclk_aud", ENABLE_PCLK_AUD, 31, 0, 0),
+
+       GATE(0, "aclk_smmu_aud", "dout_aclk_aud", ENABLE_ACLK_AUD, 27, 0, 0),
+       GATE(0, "aclk_acel_lh_async_si_top", "dout_aclk_aud",
+                        ENABLE_ACLK_AUD, 28, 0, 0),
+       GATE(ACLK_ADMA, "aclk_dmac", "dout_aclk_aud", ENABLE_ACLK_AUD, 31, 0, 0),
+};
+
+static struct samsung_cmu_info aud_cmu_info __initdata = {
+       .mux_clks               = aud_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(aud_mux_clks),
+       .div_clks               = aud_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(aud_div_clks),
+       .gate_clks              = aud_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(aud_gate_clks),
+       .nr_clk_ids             = AUD_NR_CLK,
+       .clk_regs               = aud_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(aud_clk_regs),
+};
+
+static void __init exynos7_clk_aud_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &aud_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_aud, "samsung,exynos7-clock-aud",
+               exynos7_clk_aud_init);
index 4bda54095a16a0f3a1a670d4e949a4267f411041..9e1f88c04fd46dd583e1b2ccd90fc96c8cae06e8 100644 (file)
@@ -374,19 +374,24 @@ static void samsung_clk_sleep_init(void __iomem *reg_base,
  * Common function which registers plls, muxes, dividers and gates
  * for each CMU. It also add CMU register list to register cache.
  */
-void __init samsung_cmu_register_one(struct device_node *np,
+struct samsung_clk_provider * __init samsung_cmu_register_one(
+                       struct device_node *np,
                        struct samsung_cmu_info *cmu)
 {
        void __iomem *reg_base;
        struct samsung_clk_provider *ctx;
 
        reg_base = of_iomap(np, 0);
-       if (!reg_base)
+       if (!reg_base) {
                panic("%s: failed to map registers\n", __func__);
+               return NULL;
+       }
 
        ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
-       if (!ctx)
+       if (!ctx) {
                panic("%s: unable to alllocate ctx\n", __func__);
+               return ctx;
+       }
 
        if (cmu->pll_clks)
                samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
@@ -410,4 +415,6 @@ void __init samsung_cmu_register_one(struct device_node *np,
                        cmu->nr_clk_regs);
 
        samsung_clk_of_add_provider(np, ctx);
+
+       return ctx;
 }
index 8acabe1f32c4d4f13264eb26088b237778becf6b..e4c75383cea718c7fb563db70fbdd8452b1f9bd8 100644 (file)
@@ -392,7 +392,8 @@ extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
                        struct samsung_pll_clock *pll_list,
                        unsigned int nr_clk, void __iomem *base);
 
-extern void __init samsung_cmu_register_one(struct device_node *,
+extern struct samsung_clk_provider __init *samsung_cmu_register_one(
+                       struct device_node *,
                        struct samsung_cmu_info *);
 
 extern unsigned long _get_rate(const char *clk_name);
index f83980f2b9568ffa7df081bf5593518e04bcde0b..0689d7fb2666b1956728d85f9373b9cbd2c600b1 100644 (file)
@@ -1,9 +1,11 @@
 obj-$(CONFIG_ARCH_EMEV2)               += clk-emev2.o
 obj-$(CONFIG_ARCH_R7S72100)            += clk-rz.o
+obj-$(CONFIG_ARCH_R8A73A4)             += clk-r8a73a4.o
 obj-$(CONFIG_ARCH_R8A7740)             += clk-r8a7740.o
 obj-$(CONFIG_ARCH_R8A7779)             += clk-r8a7779.o
 obj-$(CONFIG_ARCH_R8A7790)             += clk-rcar-gen2.o
 obj-$(CONFIG_ARCH_R8A7791)             += clk-rcar-gen2.o
+obj-$(CONFIG_ARCH_R8A7793)             += clk-rcar-gen2.o
 obj-$(CONFIG_ARCH_R8A7794)             += clk-rcar-gen2.o
 obj-$(CONFIG_ARCH_SH73A0)              += clk-sh73a0.o
 obj-$(CONFIG_ARCH_SHMOBILE_MULTI)      += clk-div6.o
index 639241e31e03ec244907761ef430ed95bc53adde..036a692c72195db93760e40dc1fcb1928b1ecded 100644 (file)
@@ -54,12 +54,19 @@ static int cpg_div6_clock_enable(struct clk_hw *hw)
 static void cpg_div6_clock_disable(struct clk_hw *hw)
 {
        struct div6_clock *clock = to_div6_clock(hw);
+       u32 val;
 
-       /* DIV6 clocks require the divisor field to be non-zero when stopping
-        * the clock.
+       val = clk_readl(clock->reg);
+       val |= CPG_DIV6_CKSTP;
+       /*
+        * DIV6 clocks require the divisor field to be non-zero when stopping
+        * the clock. However, some clocks (e.g. ZB on sh73a0) fail to be
+        * re-enabled later if the divisor field is changed when stopping the
+        * clock
         */
-       clk_writel(clk_readl(clock->reg) | CPG_DIV6_CKSTP | CPG_DIV6_DIV_MASK,
-                  clock->reg);
+       if (!(val & CPG_DIV6_DIV_MASK))
+               val |= CPG_DIV6_DIV_MASK;
+       clk_writel(val, clock->reg);
 }
 
 static int cpg_div6_clock_is_enabled(struct clk_hw *hw)
@@ -83,6 +90,9 @@ static unsigned int cpg_div6_clock_calc_div(unsigned long rate,
 {
        unsigned int div;
 
+       if (!rate)
+               rate = 1;
+
        div = DIV_ROUND_CLOSEST(parent_rate, rate);
        return clamp_t(unsigned int, div, 1, 64);
 }
diff --git a/drivers/clk/shmobile/clk-r8a73a4.c b/drivers/clk/shmobile/clk-r8a73a4.c
new file mode 100644 (file)
index 0000000..29b9a0b
--- /dev/null
@@ -0,0 +1,241 @@
+/*
+ * r8a73a4 Core CPG Clocks
+ *
+ * Copyright (C) 2014  Ulrich Hecht
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/clk/shmobile.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+struct r8a73a4_cpg {
+       struct clk_onecell_data data;
+       spinlock_t lock;
+       void __iomem *reg;
+};
+
+#define CPG_CKSCR      0xc0
+#define CPG_FRQCRA     0x00
+#define CPG_FRQCRB     0x04
+#define CPG_FRQCRC     0xe0
+#define CPG_PLL0CR     0xd8
+#define CPG_PLL1CR     0x28
+#define CPG_PLL2CR     0x2c
+#define CPG_PLL2HCR    0xe4
+#define CPG_PLL2SCR    0xf4
+
+#define CLK_ENABLE_ON_INIT BIT(0)
+
+struct div4_clk {
+       const char *name;
+       unsigned int reg;
+       unsigned int shift;
+};
+
+static struct div4_clk div4_clks[] = {
+       { "i",  CPG_FRQCRA, 20 },
+       { "m3", CPG_FRQCRA, 12 },
+       { "b",  CPG_FRQCRA,  8 },
+       { "m1", CPG_FRQCRA,  4 },
+       { "m2", CPG_FRQCRA,  0 },
+       { "zx", CPG_FRQCRB, 12 },
+       { "zs", CPG_FRQCRB,  8 },
+       { "hp", CPG_FRQCRB,  4 },
+       { NULL, 0, 0 },
+};
+
+static const struct clk_div_table div4_div_table[] = {
+       { 0, 2 }, { 1, 3 }, { 2, 4 }, { 3, 6 }, { 4, 8 }, { 5, 12 },
+       { 6, 16 }, { 7, 18 }, { 8, 24 }, { 10, 36 }, { 11, 48 },
+       { 12, 10 }, { 0, 0 }
+};
+
+static struct clk * __init
+r8a73a4_cpg_register_clock(struct device_node *np, struct r8a73a4_cpg *cpg,
+                            const char *name)
+{
+       const struct clk_div_table *table = NULL;
+       const char *parent_name;
+       unsigned int shift, reg;
+       unsigned int mult = 1;
+       unsigned int div = 1;
+
+
+       if (!strcmp(name, "main")) {
+               u32 ckscr = clk_readl(cpg->reg + CPG_CKSCR);
+
+               switch ((ckscr >> 28) & 3) {
+               case 0: /* extal1 */
+                       parent_name = of_clk_get_parent_name(np, 0);
+                       break;
+               case 1: /* extal1 / 2 */
+                       parent_name = of_clk_get_parent_name(np, 0);
+                       div = 2;
+                       break;
+               case 2: /* extal2 */
+                       parent_name = of_clk_get_parent_name(np, 1);
+                       break;
+               case 3: /* extal2 / 2 */
+                       parent_name = of_clk_get_parent_name(np, 1);
+                       div = 2;
+                       break;
+               }
+       } else if (!strcmp(name, "pll0")) {
+               /* PLL0/1 are configurable multiplier clocks. Register them as
+                * fixed factor clocks for now as there's no generic multiplier
+                * clock implementation and we currently have no need to change
+                * the multiplier value.
+                */
+               u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+
+               parent_name = "main";
+               mult = ((value >> 24) & 0x7f) + 1;
+               if (value & BIT(20))
+                       div = 2;
+       } else if (!strcmp(name, "pll1")) {
+               u32 value = clk_readl(cpg->reg + CPG_PLL1CR);
+
+               parent_name = "main";
+               /* XXX: enable bit? */
+               mult = ((value >> 24) & 0x7f) + 1;
+               if (value & BIT(7))
+                       div = 2;
+       } else if (!strncmp(name, "pll2", 4)) {
+               u32 value, cr;
+
+               switch (name[4]) {
+               case 0:
+                       cr = CPG_PLL2CR;
+                       break;
+               case 's':
+                       cr = CPG_PLL2SCR;
+                       break;
+               case 'h':
+                       cr = CPG_PLL2HCR;
+                       break;
+               default:
+                       return ERR_PTR(-EINVAL);
+               }
+               value = clk_readl(cpg->reg + cr);
+               switch ((value >> 5) & 7) {
+               case 0:
+                       parent_name = "main";
+                       div = 2;
+                       break;
+               case 1:
+                       parent_name = "extal2";
+                       div = 2;
+                       break;
+               case 3:
+                       parent_name = "extal2";
+                       div = 4;
+                       break;
+               case 4:
+                       parent_name = "main";
+                       break;
+               case 5:
+                       parent_name = "extal2";
+                       break;
+               default:
+                       pr_warn("%s: unexpected parent of %s\n", __func__,
+                               name);
+                       return ERR_PTR(-EINVAL);
+               }
+               /* XXX: enable bit? */
+               mult = ((value >> 24) & 0x7f) + 1;
+       } else if (!strcmp(name, "z") || !strcmp(name, "z2")) {
+               u32 shift = 8;
+
+               parent_name = "pll0";
+               if (name[1] == '2') {
+                       div = 2;
+                       shift = 0;
+               }
+               div *= 32;
+               mult = 0x20 - ((clk_readl(cpg->reg + CPG_FRQCRC) >> shift)
+                      & 0x1f);
+       } else {
+               struct div4_clk *c;
+
+               for (c = div4_clks; c->name; c++) {
+                       if (!strcmp(name, c->name))
+                               break;
+               }
+               if (!c->name)
+                       return ERR_PTR(-EINVAL);
+
+               parent_name = "pll1";
+               table = div4_div_table;
+               reg = c->reg;
+               shift = c->shift;
+       }
+
+       if (!table) {
+               return clk_register_fixed_factor(NULL, name, parent_name, 0,
+                                                mult, div);
+       } else {
+               return clk_register_divider_table(NULL, name, parent_name, 0,
+                                                 cpg->reg + reg, shift, 4, 0,
+                                                 table, &cpg->lock);
+       }
+}
+
+static void __init r8a73a4_cpg_clocks_init(struct device_node *np)
+{
+       struct r8a73a4_cpg *cpg;
+       struct clk **clks;
+       unsigned int i;
+       int num_clks;
+
+       num_clks = of_property_count_strings(np, "clock-output-names");
+       if (num_clks < 0) {
+               pr_err("%s: failed to count clocks\n", __func__);
+               return;
+       }
+
+       cpg = kzalloc(sizeof(*cpg), GFP_KERNEL);
+       clks = kcalloc(num_clks, sizeof(*clks), GFP_KERNEL);
+       if (cpg == NULL || clks == NULL) {
+               /* We're leaking memory on purpose, there's no point in cleaning
+                * up as the system won't boot anyway.
+                */
+               return;
+       }
+
+       spin_lock_init(&cpg->lock);
+
+       cpg->data.clks = clks;
+       cpg->data.clk_num = num_clks;
+
+       cpg->reg = of_iomap(np, 0);
+       if (WARN_ON(cpg->reg == NULL))
+               return;
+
+       for (i = 0; i < num_clks; ++i) {
+               const char *name;
+               struct clk *clk;
+
+               of_property_read_string_index(np, "clock-output-names", i,
+                                             &name);
+
+               clk = r8a73a4_cpg_register_clock(np, cpg, name);
+               if (IS_ERR(clk))
+                       pr_err("%s: failed to register %s %s clock (%ld)\n",
+                              __func__, np->name, name, PTR_ERR(clk));
+               else
+                       cpg->data.clks[i] = clk;
+       }
+
+       of_clk_add_provider(np, of_clk_src_onecell_get, &cpg->data);
+}
+CLK_OF_DECLARE(r8a73a4_cpg_clks, "renesas,r8a73a4-cpg-clocks",
+              r8a73a4_cpg_clocks_init);
index e996425d06a920728cd4f32448c19d08efb5a776..acfb6d7dbd6bc049fe39213d675d5a2602e65d9e 100644 (file)
@@ -33,6 +33,8 @@ struct rcar_gen2_cpg {
 #define CPG_FRQCRC                     0x000000e0
 #define CPG_FRQCRC_ZFC_MASK            (0x1f << 8)
 #define CPG_FRQCRC_ZFC_SHIFT           8
+#define CPG_ADSPCKCR                   0x0000025c
+#define CPG_RCANCKCR                   0x00000270
 
 /* -----------------------------------------------------------------------------
  * Z Clock
@@ -161,6 +163,88 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
        return clk;
 }
 
+static struct clk * __init cpg_rcan_clk_register(struct rcar_gen2_cpg *cpg,
+                                                struct device_node *np)
+{
+       const char *parent_name = of_clk_get_parent_name(np, 1);
+       struct clk_fixed_factor *fixed;
+       struct clk_gate *gate;
+       struct clk *clk;
+
+       fixed = kzalloc(sizeof(*fixed), GFP_KERNEL);
+       if (!fixed)
+               return ERR_PTR(-ENOMEM);
+
+       fixed->mult = 1;
+       fixed->div = 6;
+
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate) {
+               kfree(fixed);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       gate->reg = cpg->reg + CPG_RCANCKCR;
+       gate->bit_idx = 8;
+       gate->flags = CLK_GATE_SET_TO_DISABLE;
+       gate->lock = &cpg->lock;
+
+       clk = clk_register_composite(NULL, "rcan", &parent_name, 1, NULL, NULL,
+                                    &fixed->hw, &clk_fixed_factor_ops,
+                                    &gate->hw, &clk_gate_ops, 0);
+       if (IS_ERR(clk)) {
+               kfree(gate);
+               kfree(fixed);
+       }
+
+       return clk;
+}
+
+/* ADSP divisors */
+static const struct clk_div_table cpg_adsp_div_table[] = {
+       {  1,  3 }, {  2,  4 }, {  3,  6 }, {  4,  8 },
+       {  5, 12 }, {  6, 16 }, {  7, 18 }, {  8, 24 },
+       { 10, 36 }, { 11, 48 }, {  0,  0 },
+};
+
+static struct clk * __init cpg_adsp_clk_register(struct rcar_gen2_cpg *cpg)
+{
+       const char *parent_name = "pll1";
+       struct clk_divider *div;
+       struct clk_gate *gate;
+       struct clk *clk;
+
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               return ERR_PTR(-ENOMEM);
+
+       div->reg = cpg->reg + CPG_ADSPCKCR;
+       div->width = 4;
+       div->table = cpg_adsp_div_table;
+       div->lock = &cpg->lock;
+
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate) {
+               kfree(div);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       gate->reg = cpg->reg + CPG_ADSPCKCR;
+       gate->bit_idx = 8;
+       gate->flags = CLK_GATE_SET_TO_DISABLE;
+       gate->lock = &cpg->lock;
+
+       clk = clk_register_composite(NULL, "adsp", &parent_name, 1, NULL, NULL,
+                                    &div->hw, &clk_divider_ops,
+                                    &gate->hw, &clk_gate_ops, 0);
+       if (IS_ERR(clk)) {
+               kfree(gate);
+               kfree(div);
+       }
+
+       return clk;
+}
+
 /* -----------------------------------------------------------------------------
  * CPG Clock Data
  */
@@ -263,6 +347,10 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
                shift = 0;
        } else if (!strcmp(name, "z")) {
                return cpg_z_clk_register(cpg);
+       } else if (!strcmp(name, "rcan")) {
+               return cpg_rcan_clk_register(cpg, np);
+       } else if (!strcmp(name, "adsp")) {
+               return cpg_adsp_clk_register(cpg);
        } else {
                return ERR_PTR(-EINVAL);
        }
index 2282cef9f2ffb0d834b84be7b2b00c32190bde15..bf12a25eb3a22aab048f04d5280195d21bd7dd01 100644 (file)
@@ -37,8 +37,8 @@ static int flexgen_enable(struct clk_hw *hw)
        struct clk_hw *pgate_hw = &flexgen->pgate.hw;
        struct clk_hw *fgate_hw = &flexgen->fgate.hw;
 
-       pgate_hw->clk = hw->clk;
-       fgate_hw->clk = hw->clk;
+       __clk_hw_set_clk(pgate_hw, hw);
+       __clk_hw_set_clk(fgate_hw, hw);
 
        clk_gate_ops.enable(pgate_hw);
 
@@ -54,7 +54,7 @@ static void flexgen_disable(struct clk_hw *hw)
        struct clk_hw *fgate_hw = &flexgen->fgate.hw;
 
        /* disable only the final gate */
-       fgate_hw->clk = hw->clk;
+       __clk_hw_set_clk(fgate_hw, hw);
 
        clk_gate_ops.disable(fgate_hw);
 
@@ -66,7 +66,7 @@ static int flexgen_is_enabled(struct clk_hw *hw)
        struct flexgen *flexgen = to_flexgen(hw);
        struct clk_hw *fgate_hw = &flexgen->fgate.hw;
 
-       fgate_hw->clk = hw->clk;
+       __clk_hw_set_clk(fgate_hw, hw);
 
        if (!clk_gate_ops.is_enabled(fgate_hw))
                return 0;
@@ -79,7 +79,7 @@ static u8 flexgen_get_parent(struct clk_hw *hw)
        struct flexgen *flexgen = to_flexgen(hw);
        struct clk_hw *mux_hw = &flexgen->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return clk_mux_ops.get_parent(mux_hw);
 }
@@ -89,7 +89,7 @@ static int flexgen_set_parent(struct clk_hw *hw, u8 index)
        struct flexgen *flexgen = to_flexgen(hw);
        struct clk_hw *mux_hw = &flexgen->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return clk_mux_ops.set_parent(mux_hw, index);
 }
@@ -124,8 +124,8 @@ unsigned long flexgen_recalc_rate(struct clk_hw *hw,
        struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
        unsigned long mid_rate;
 
-       pdiv_hw->clk = hw->clk;
-       fdiv_hw->clk = hw->clk;
+       __clk_hw_set_clk(pdiv_hw, hw);
+       __clk_hw_set_clk(fdiv_hw, hw);
 
        mid_rate = clk_divider_ops.recalc_rate(pdiv_hw, parent_rate);
 
@@ -138,16 +138,27 @@ static int flexgen_set_rate(struct clk_hw *hw, unsigned long rate,
        struct flexgen *flexgen = to_flexgen(hw);
        struct clk_hw *pdiv_hw = &flexgen->pdiv.hw;
        struct clk_hw *fdiv_hw = &flexgen->fdiv.hw;
-       unsigned long primary_div = 0;
+       unsigned long div = 0;
        int ret = 0;
 
-       pdiv_hw->clk = hw->clk;
-       fdiv_hw->clk = hw->clk;
+       __clk_hw_set_clk(pdiv_hw, hw);
+       __clk_hw_set_clk(fdiv_hw, hw);
 
-       primary_div = clk_best_div(parent_rate, rate);
+       div = clk_best_div(parent_rate, rate);
 
-       clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
-       ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * primary_div);
+       /*
+       * pdiv is mainly targeted for low freq results, while fdiv
+       * should be used for div <= 64. The other way round can
+       * lead to 'duty cycle' issues.
+       */
+
+       if (div <= 64) {
+               clk_divider_ops.set_rate(pdiv_hw, parent_rate, parent_rate);
+               ret = clk_divider_ops.set_rate(fdiv_hw, rate, rate * div);
+       } else {
+               clk_divider_ops.set_rate(fdiv_hw, parent_rate, parent_rate);
+               ret = clk_divider_ops.set_rate(pdiv_hw, rate, rate * div);
+       }
 
        return ret;
 }
index 79dc40b5cc688fb1a028ca8407cd3feb929ca873..9a15ec344a85900ea68029c730c02ad01ede5335 100644 (file)
@@ -94,7 +94,7 @@ static int clkgena_divmux_enable(struct clk_hw *hw)
        unsigned long timeout;
        int ret = 0;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        ret = clk_mux_ops.set_parent(mux_hw, genamux->muxsel);
        if (ret)
@@ -116,7 +116,7 @@ static void clkgena_divmux_disable(struct clk_hw *hw)
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *mux_hw = &genamux->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        clk_mux_ops.set_parent(mux_hw, CKGAX_CLKOPSRC_SWITCH_OFF);
 }
@@ -126,7 +126,7 @@ static int clkgena_divmux_is_enabled(struct clk_hw *hw)
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *mux_hw = &genamux->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return (s8)clk_mux_ops.get_parent(mux_hw) > 0;
 }
@@ -136,7 +136,7 @@ u8 clkgena_divmux_get_parent(struct clk_hw *hw)
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *mux_hw = &genamux->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        genamux->muxsel = clk_mux_ops.get_parent(mux_hw);
        if ((s8)genamux->muxsel < 0) {
@@ -174,7 +174,7 @@ unsigned long clkgena_divmux_recalc_rate(struct clk_hw *hw,
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return clk_divider_ops.recalc_rate(div_hw, parent_rate);
 }
@@ -185,7 +185,7 @@ static int clkgena_divmux_set_rate(struct clk_hw *hw, unsigned long rate,
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return clk_divider_ops.set_rate(div_hw, rate, parent_rate);
 }
@@ -196,7 +196,7 @@ static long clkgena_divmux_round_rate(struct clk_hw *hw, unsigned long rate,
        struct clkgena_divmux *genamux = to_clkgena_divmux(hw);
        struct clk_hw *div_hw = &genamux->div[genamux->muxsel].hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return clk_divider_ops.round_rate(div_hw, rate, prate);
 }
index a66953c0f43094a4b96a816fb0d4ff37bfedf466..3a5292e3fcf8086f6418c8dceae6c59aab79b082 100644 (file)
@@ -8,6 +8,7 @@ obj-y += clk-a20-gmac.o
 obj-y += clk-mod0.o
 obj-y += clk-sun8i-mbus.o
 obj-y += clk-sun9i-core.o
+obj-y += clk-sun9i-mmc.o
 
 obj-$(CONFIG_MFD_SUN6I_PRCM) += \
        clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
index 62e08fb58554cbe8d4bf8f1c5a08f08bbd58ec68..8c20190a3e9f4e134824449c10a72e9e13b5b957 100644 (file)
@@ -80,6 +80,8 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
 }
 
 static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                      unsigned long min_rate,
+                                      unsigned long max_rate,
                                       unsigned long *best_parent_rate,
                                       struct clk_hw **best_parent_p)
 {
@@ -156,9 +158,10 @@ static const struct clk_ops clk_factors_ops = {
        .set_rate = clk_factors_set_rate,
 };
 
-struct clk * __init sunxi_factors_register(struct device_node *node,
-                                          const struct factors_data *data,
-                                          spinlock_t *lock)
+struct clk *sunxi_factors_register(struct device_node *node,
+                                  const struct factors_data *data,
+                                  spinlock_t *lock,
+                                  void __iomem *reg)
 {
        struct clk *clk;
        struct clk_factors *factors;
@@ -168,11 +171,8 @@ struct clk * __init sunxi_factors_register(struct device_node *node,
        struct clk_hw *mux_hw = NULL;
        const char *clk_name = node->name;
        const char *parents[FACTORS_MAX_PARENTS];
-       void __iomem *reg;
        int i = 0;
 
-       reg = of_iomap(node, 0);
-
        /* if we have a mux, we will have >1 parents */
        while (i < FACTORS_MAX_PARENTS &&
               (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
index 912238fde1324224863035da8a5836b5c276e8a0..171085ab5513e4f724017d05d6f91929262a4f78 100644 (file)
@@ -36,8 +36,9 @@ struct clk_factors {
        spinlock_t *lock;
 };
 
-struct clk * __init sunxi_factors_register(struct device_node *node,
-                                          const struct factors_data *data,
-                                          spinlock_t *lock);
+struct clk *sunxi_factors_register(struct device_node *node,
+                                  const struct factors_data *data,
+                                  spinlock_t *lock,
+                                  void __iomem *reg);
 
 #endif
index da0524eaee9406aff6c2d73f8b56b82c12360a16..ec8f5a1fca09f4240c433a1de2ea8207e6e369e5 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 #include <linux/of_address.h>
+#include <linux/platform_device.h>
 
 #include "clk-factors.h"
 
@@ -67,7 +68,7 @@ static struct clk_factors_config sun4i_a10_mod0_config = {
        .pwidth = 2,
 };
 
-static const struct factors_data sun4i_a10_mod0_data __initconst = {
+static const struct factors_data sun4i_a10_mod0_data = {
        .enable = 31,
        .mux = 24,
        .muxmask = BIT(1) | BIT(0),
@@ -79,15 +80,95 @@ static DEFINE_SPINLOCK(sun4i_a10_mod0_lock);
 
 static void __init sun4i_a10_mod0_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun4i_a10_mod0_lock);
+       void __iomem *reg;
+
+       reg = of_iomap(node, 0);
+       if (!reg) {
+               /*
+                * This happens with mod0 clk nodes instantiated through
+                * mfd, as those do not have their resources assigned at
+                * CLK_OF_DECLARE time yet, so do not print an error.
+                */
+               return;
+       }
+
+       sunxi_factors_register(node, &sun4i_a10_mod0_data,
+                              &sun4i_a10_mod0_lock, reg);
 }
 CLK_OF_DECLARE(sun4i_a10_mod0, "allwinner,sun4i-a10-mod0-clk", sun4i_a10_mod0_setup);
 
+static int sun4i_a10_mod0_clk_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct resource *r;
+       void __iomem *reg;
+
+       if (!np)
+               return -ENODEV;
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       reg = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(reg))
+               return PTR_ERR(reg);
+
+       sunxi_factors_register(np, &sun4i_a10_mod0_data,
+                              &sun4i_a10_mod0_lock, reg);
+       return 0;
+}
+
+static const struct of_device_id sun4i_a10_mod0_clk_dt_ids[] = {
+       { .compatible = "allwinner,sun4i-a10-mod0-clk" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver sun4i_a10_mod0_clk_driver = {
+       .driver = {
+               .name = "sun4i-a10-mod0-clk",
+               .of_match_table = sun4i_a10_mod0_clk_dt_ids,
+       },
+       .probe = sun4i_a10_mod0_clk_probe,
+};
+module_platform_driver(sun4i_a10_mod0_clk_driver);
+
+static const struct factors_data sun9i_a80_mod0_data __initconst = {
+       .enable = 31,
+       .mux = 24,
+       .muxmask = BIT(3) | BIT(2) | BIT(1) | BIT(0),
+       .table = &sun4i_a10_mod0_config,
+       .getter = sun4i_a10_get_mod0_factors,
+};
+
+static void __init sun9i_a80_mod0_setup(struct device_node *node)
+{
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (IS_ERR(reg)) {
+               pr_err("Could not get registers for mod0-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_mod0_data,
+                              &sun4i_a10_mod0_lock, reg);
+}
+CLK_OF_DECLARE(sun9i_a80_mod0, "allwinner,sun9i-a80-mod0-clk", sun9i_a80_mod0_setup);
+
 static DEFINE_SPINLOCK(sun5i_a13_mbus_lock);
 
 static void __init sun5i_a13_mbus_setup(struct device_node *node)
 {
-       struct clk *mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data, &sun5i_a13_mbus_lock);
+       struct clk *mbus;
+       void __iomem *reg;
+
+       reg = of_iomap(node, 0);
+       if (!reg) {
+               pr_err("Could not get registers for a13-mbus-clk\n");
+               return;
+       }
+
+       mbus = sunxi_factors_register(node, &sun4i_a10_mod0_data,
+                                     &sun5i_a13_mbus_lock, reg);
 
        /* The MBUS clocks needs to be always enabled */
        __clk_get(mbus);
@@ -95,14 +176,10 @@ static void __init sun5i_a13_mbus_setup(struct device_node *node)
 }
 CLK_OF_DECLARE(sun5i_a13_mbus, "allwinner,sun5i-a13-mbus-clk", sun5i_a13_mbus_setup);
 
-struct mmc_phase_data {
-       u8      offset;
-};
-
 struct mmc_phase {
        struct clk_hw           hw;
+       u8                      offset;
        void __iomem            *reg;
-       struct mmc_phase_data   *data;
        spinlock_t              *lock;
 };
 
@@ -118,7 +195,7 @@ static int mmc_get_phase(struct clk_hw *hw)
        u8 delay;
 
        value = readl(phase->reg);
-       delay = (value >> phase->data->offset) & 0x3;
+       delay = (value >> phase->offset) & 0x3;
 
        if (!delay)
                return 180;
@@ -206,8 +283,8 @@ static int mmc_set_phase(struct clk_hw *hw, int degrees)
 
        spin_lock_irqsave(phase->lock, flags);
        value = readl(phase->reg);
-       value &= ~GENMASK(phase->data->offset + 3, phase->data->offset);
-       value |= delay << phase->data->offset;
+       value &= ~GENMASK(phase->offset + 3, phase->offset);
+       value |= delay << phase->offset;
        writel(value, phase->reg);
        spin_unlock_irqrestore(phase->lock, flags);
 
@@ -219,66 +296,97 @@ static const struct clk_ops mmc_clk_ops = {
        .set_phase      = mmc_set_phase,
 };
 
-static void __init sun4i_a10_mmc_phase_setup(struct device_node *node,
-                                            struct mmc_phase_data *data)
+/*
+ * sunxi_mmc_setup - Common setup function for mmc module clocks
+ *
+ * The only difference between module clocks on different platforms is the
+ * width of the mux register bits and the valid values, which are passed in
+ * through struct factors_data. The phase clocks parts are identical.
+ */
+static void __init sunxi_mmc_setup(struct device_node *node,
+                                  const struct factors_data *data,
+                                  spinlock_t *lock)
 {
-       const char *parent_names[1] = { of_clk_get_parent_name(node, 0) };
-       struct clk_init_data init = {
-               .num_parents    = 1,
-               .parent_names   = parent_names,
-               .ops            = &mmc_clk_ops,
-       };
-
-       struct mmc_phase *phase;
-       struct clk *clk;
-
-       phase = kmalloc(sizeof(*phase), GFP_KERNEL);
-       if (!phase)
+       struct clk_onecell_data *clk_data;
+       const char *parent;
+       void __iomem *reg;
+       int i;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (IS_ERR(reg)) {
+               pr_err("Couldn't map the %s clock registers\n", node->name);
                return;
+       }
 
-       phase->hw.init = &init;
-
-       phase->reg = of_iomap(node, 0);
-       if (!phase->reg)
-               goto err_free;
-
-       phase->data = data;
-       phase->lock = &sun4i_a10_mod0_lock;
-
-       if (of_property_read_string(node, "clock-output-names", &init.name))
-               init.name = node->name;
+       clk_data = kmalloc(sizeof(*clk_data), GFP_KERNEL);
+       if (!clk_data)
+               return;
 
-       clk = clk_register(NULL, &phase->hw);
-       if (IS_ERR(clk))
-               goto err_unmap;
+       clk_data->clks = kcalloc(3, sizeof(*clk_data->clks), GFP_KERNEL);
+       if (!clk_data->clks)
+               goto err_free_data;
+
+       clk_data->clk_num = 3;
+       clk_data->clks[0] = sunxi_factors_register(node, data, lock, reg);
+       if (!clk_data->clks[0])
+               goto err_free_clks;
+
+       parent = __clk_get_name(clk_data->clks[0]);
+
+       for (i = 1; i < 3; i++) {
+               struct clk_init_data init = {
+                       .num_parents    = 1,
+                       .parent_names   = &parent,
+                       .ops            = &mmc_clk_ops,
+               };
+               struct mmc_phase *phase;
+
+               phase = kmalloc(sizeof(*phase), GFP_KERNEL);
+               if (!phase)
+                       continue;
+
+               phase->hw.init = &init;
+               phase->reg = reg;
+               phase->lock = lock;
+
+               if (i == 1)
+                       phase->offset = 8;
+               else
+                       phase->offset = 20;
+
+               if (of_property_read_string_index(node, "clock-output-names",
+                                                 i, &init.name))
+                       init.name = node->name;
+
+               clk_data->clks[i] = clk_register(NULL, &phase->hw);
+               if (IS_ERR(clk_data->clks[i])) {
+                       kfree(phase);
+                       continue;
+               }
+       }
 
-       of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       of_clk_add_provider(node, of_clk_src_onecell_get, clk_data);
 
        return;
 
-err_unmap:
-       iounmap(phase->reg);
-err_free:
-       kfree(phase);
+err_free_clks:
+       kfree(clk_data->clks);
+err_free_data:
+       kfree(clk_data);
 }
 
+static DEFINE_SPINLOCK(sun4i_a10_mmc_lock);
 
-static struct mmc_phase_data mmc_output_clk = {
-       .offset = 8,
-};
-
-static struct mmc_phase_data mmc_sample_clk = {
-       .offset = 20,
-};
-
-static void __init sun4i_a10_mmc_output_setup(struct device_node *node)
+static void __init sun4i_a10_mmc_setup(struct device_node *node)
 {
-       sun4i_a10_mmc_phase_setup(node, &mmc_output_clk);
+       sunxi_mmc_setup(node, &sun4i_a10_mod0_data, &sun4i_a10_mmc_lock);
 }
-CLK_OF_DECLARE(sun4i_a10_mmc_output, "allwinner,sun4i-a10-mmc-output-clk", sun4i_a10_mmc_output_setup);
+CLK_OF_DECLARE(sun4i_a10_mmc, "allwinner,sun4i-a10-mmc-clk", sun4i_a10_mmc_setup);
+
+static DEFINE_SPINLOCK(sun9i_a80_mmc_lock);
 
-static void __init sun4i_a10_mmc_sample_setup(struct device_node *node)
+static void __init sun9i_a80_mmc_setup(struct device_node *node)
 {
-       sun4i_a10_mmc_phase_setup(node, &mmc_sample_clk);
+       sunxi_mmc_setup(node, &sun9i_a80_mod0_data, &sun9i_a80_mmc_lock);
 }
-CLK_OF_DECLARE(sun4i_a10_mmc_sample, "allwinner,sun4i-a10-mmc-sample-clk", sun4i_a10_mmc_sample_setup);
+CLK_OF_DECLARE(sun9i_a80_mmc, "allwinner,sun9i-a80-mmc-clk", sun9i_a80_mmc_setup);
index 3d282fb8f85cc204d84c182aa4b16ae2181ea5e9..63cf149195ae1a40cb61a7b2256878c0f2ccd5ef 100644 (file)
@@ -45,6 +45,8 @@ static unsigned long ar100_recalc_rate(struct clk_hw *hw,
 }
 
 static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                unsigned long min_rate,
+                                unsigned long max_rate,
                                 unsigned long *best_parent_rate,
                                 struct clk_hw **best_parent_clk)
 {
index ef49786eefd3caa5b6f856287635a973213a14b3..14cd026064bf377ed7cbc05bbd84aa608e639fec 100644 (file)
@@ -69,8 +69,17 @@ static DEFINE_SPINLOCK(sun8i_a23_mbus_lock);
 
 static void __init sun8i_a23_mbus_setup(struct device_node *node)
 {
-       struct clk *mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
-                                                 &sun8i_a23_mbus_lock);
+       struct clk *mbus;
+       void __iomem *reg;
+
+       reg = of_iomap(node, 0);
+       if (!reg) {
+               pr_err("Could not get registers for a23-mbus-clk\n");
+               return;
+       }
+
+       mbus = sunxi_factors_register(node, &sun8i_a23_mbus_data,
+                                     &sun8i_a23_mbus_lock, reg);
 
        /* The MBUS clocks needs to be always enabled */
        __clk_get(mbus);
index 3cb9036d91bb202fc057273302a6f2bcc31edf9f..d8da77d72861b29f0867d47b43b7917da31037f7 100644 (file)
 
 
 /**
- * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL1
+ * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL4
  * PLL4 rate is calculated as follows
  * rate = (parent_rate * n >> p) / (m + 1);
- * parent_rate is always 24Mhz
+ * parent_rate is always 24MHz
  *
  * p and m are named div1 and div2 in Allwinner's SDK
  */
 
 static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
-                                      u8 *n, u8 *k, u8 *m, u8 *p)
+                                      u8 *n_ret, u8 *k, u8 *m_ret, u8 *p_ret)
 {
-       int div;
+       int n;
+       int m = 1;
+       int p = 1;
 
-       /* Normalize value to a 6M multiple */
-       div = DIV_ROUND_UP(*freq, 6000000);
+       /* Normalize value to a 6 MHz multiple (24 MHz / 4) */
+       n = DIV_ROUND_UP(*freq, 6000000);
 
-       /* divs above 256 cannot be odd */
-       if (div > 256)
-               div = round_up(div, 2);
+       /* If n is too large switch to steps of 12 MHz */
+       if (n > 255) {
+               m = 0;
+               n = (n + 1) / 2;
+       }
+
+       /* If n is still too large switch to steps of 24 MHz */
+       if (n > 255) {
+               p = 0;
+               n = (n + 1) / 2;
+       }
 
-       /* divs above 512 must be a multiple of 4 */
-       if (div > 512)
-               div = round_up(div, 4);
+       /* n must be between 12 and 255 */
+       if (n > 255)
+               n = 255;
+       else if (n < 12)
+               n = 12;
 
-       *freq = 6000000 * div;
+       *freq = ((24000000 * n) >> p) / (m + 1);
 
        /* we were called to round the frequency, we can now return */
-       if (n == NULL)
+       if (n_ret == NULL)
                return;
 
-       /* p will be 1 for divs under 512 */
-       if (div < 512)
-               *p = 1;
-       else
-               *p = 0;
-
-       /* m will be 1 if div is odd */
-       if (div & 1)
-               *m = 1;
-       else
-               *m = 0;
-
-       /* calculate a suitable n based on m and p */
-       *n = div / (*p + 1) / (*m + 1);
+       *n_ret = n;
+       *m_ret = m;
+       *p_ret = p;
 }
 
 static struct clk_factors_config sun9i_a80_pll4_config = {
@@ -89,7 +90,17 @@ static DEFINE_SPINLOCK(sun9i_a80_pll4_lock);
 
 static void __init sun9i_a80_pll4_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun9i_a80_pll4_data, &sun9i_a80_pll4_lock);
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-pll4-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_pll4_data,
+                              &sun9i_a80_pll4_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup);
 
@@ -139,8 +150,18 @@ static DEFINE_SPINLOCK(sun9i_a80_gt_lock);
 
 static void __init sun9i_a80_gt_setup(struct device_node *node)
 {
-       struct clk *gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
-                                               &sun9i_a80_gt_lock);
+       void __iomem *reg;
+       struct clk *gt;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-gt-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
+                                   &sun9i_a80_gt_lock, reg);
 
        /* The GT bus clock needs to be always enabled */
        __clk_get(gt);
@@ -194,7 +215,17 @@ static DEFINE_SPINLOCK(sun9i_a80_ahb_lock);
 
 static void __init sun9i_a80_ahb_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun9i_a80_ahb_data, &sun9i_a80_ahb_lock);
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-ahb-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_ahb_data,
+                              &sun9i_a80_ahb_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup);
 
@@ -210,7 +241,17 @@ static DEFINE_SPINLOCK(sun9i_a80_apb0_lock);
 
 static void __init sun9i_a80_apb0_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun9i_a80_apb0_data, &sun9i_a80_apb0_lock);
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-apb0-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_apb0_data,
+                              &sun9i_a80_apb0_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup);
 
@@ -266,6 +307,16 @@ static DEFINE_SPINLOCK(sun9i_a80_apb1_lock);
 
 static void __init sun9i_a80_apb1_setup(struct device_node *node)
 {
-       sunxi_factors_register(node, &sun9i_a80_apb1_data, &sun9i_a80_apb1_lock);
+       void __iomem *reg;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+       if (!reg) {
+               pr_err("Could not get registers for a80-apb1-clk: %s\n",
+                      node->name);
+               return;
+       }
+
+       sunxi_factors_register(node, &sun9i_a80_apb1_data,
+                              &sun9i_a80_apb1_lock, reg);
 }
 CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup);
diff --git a/drivers/clk/sunxi/clk-sun9i-mmc.c b/drivers/clk/sunxi/clk-sun9i-mmc.c
new file mode 100644 (file)
index 0000000..710c273
--- /dev/null
@@ -0,0 +1,219 @@
+/*
+ * Copyright 2015 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai        <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/platform_device.h>
+#include <linux/reset-controller.h>
+#include <linux/spinlock.h>
+
+#define SUN9I_MMC_WIDTH                4
+
+#define SUN9I_MMC_GATE_BIT     16
+#define SUN9I_MMC_RESET_BIT    18
+
+struct sun9i_mmc_clk_data {
+       spinlock_t                      lock;
+       void __iomem                    *membase;
+       struct clk                      *clk;
+       struct reset_control            *reset;
+       struct clk_onecell_data         clk_data;
+       struct reset_controller_dev     rcdev;
+};
+
+static int sun9i_mmc_reset_assert(struct reset_controller_dev *rcdev,
+                             unsigned long id)
+{
+       struct sun9i_mmc_clk_data *data = container_of(rcdev,
+                                                      struct sun9i_mmc_clk_data,
+                                                      rcdev);
+       unsigned long flags;
+       void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id;
+       u32 val;
+
+       clk_prepare_enable(data->clk);
+       spin_lock_irqsave(&data->lock, flags);
+
+       val = readl(reg);
+       writel(val & ~BIT(SUN9I_MMC_RESET_BIT), reg);
+
+       spin_unlock_irqrestore(&data->lock, flags);
+       clk_disable_unprepare(data->clk);
+
+       return 0;
+}
+
+static int sun9i_mmc_reset_deassert(struct reset_controller_dev *rcdev,
+                               unsigned long id)
+{
+       struct sun9i_mmc_clk_data *data = container_of(rcdev,
+                                                      struct sun9i_mmc_clk_data,
+                                                      rcdev);
+       unsigned long flags;
+       void __iomem *reg = data->membase + SUN9I_MMC_WIDTH * id;
+       u32 val;
+
+       clk_prepare_enable(data->clk);
+       spin_lock_irqsave(&data->lock, flags);
+
+       val = readl(reg);
+       writel(val | BIT(SUN9I_MMC_RESET_BIT), reg);
+
+       spin_unlock_irqrestore(&data->lock, flags);
+       clk_disable_unprepare(data->clk);
+
+       return 0;
+}
+
+static struct reset_control_ops sun9i_mmc_reset_ops = {
+       .assert         = sun9i_mmc_reset_assert,
+       .deassert       = sun9i_mmc_reset_deassert,
+};
+
+static int sun9i_a80_mmc_config_clk_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct sun9i_mmc_clk_data *data;
+       struct clk_onecell_data *clk_data;
+       const char *clk_name = np->name;
+       const char *clk_parent;
+       struct resource *r;
+       int count, i, ret;
+
+       data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
+
+       spin_lock_init(&data->lock);
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       /* one clock/reset pair per word */
+       count = DIV_ROUND_UP((r->end - r->start + 1), SUN9I_MMC_WIDTH);
+       data->membase = devm_ioremap_resource(&pdev->dev, r);
+       if (IS_ERR(data->membase))
+               return PTR_ERR(data->membase);
+
+       clk_data = &data->clk_data;
+       clk_data->clk_num = count;
+       clk_data->clks = devm_kcalloc(&pdev->dev, count, sizeof(struct clk *),
+                                     GFP_KERNEL);
+       if (!clk_data->clks)
+               return -ENOMEM;
+
+       data->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(data->clk)) {
+               dev_err(&pdev->dev, "Could not get clock\n");
+               return PTR_ERR(data->clk);
+       }
+
+       data->reset = devm_reset_control_get(&pdev->dev, NULL);
+       if (IS_ERR(data->reset)) {
+               dev_err(&pdev->dev, "Could not get reset control\n");
+               return PTR_ERR(data->reset);
+       }
+
+       ret = reset_control_deassert(data->reset);
+       if (ret) {
+               dev_err(&pdev->dev, "Reset deassert err %d\n", ret);
+               return ret;
+       }
+
+       clk_parent = __clk_get_name(data->clk);
+       for (i = 0; i < count; i++) {
+               of_property_read_string_index(np, "clock-output-names",
+                                             i, &clk_name);
+
+               clk_data->clks[i] = clk_register_gate(&pdev->dev, clk_name,
+                                                     clk_parent, 0,
+                                                     data->membase + SUN9I_MMC_WIDTH * i,
+                                                     SUN9I_MMC_GATE_BIT, 0,
+                                                     &data->lock);
+
+               if (IS_ERR(clk_data->clks[i])) {
+                       ret = PTR_ERR(clk_data->clks[i]);
+                       goto err_clk_register;
+               }
+       }
+
+       ret = of_clk_add_provider(np, of_clk_src_onecell_get, clk_data);
+       if (ret)
+               goto err_clk_provider;
+
+       data->rcdev.owner = THIS_MODULE;
+       data->rcdev.nr_resets = count;
+       data->rcdev.ops = &sun9i_mmc_reset_ops;
+       data->rcdev.of_node = pdev->dev.of_node;
+
+       ret = reset_controller_register(&data->rcdev);
+       if (ret)
+               goto err_rc_reg;
+
+       platform_set_drvdata(pdev, data);
+
+       return 0;
+
+err_rc_reg:
+       of_clk_del_provider(np);
+
+err_clk_provider:
+       for (i = 0; i < count; i++)
+               clk_unregister(clk_data->clks[i]);
+
+err_clk_register:
+       reset_control_assert(data->reset);
+
+       return ret;
+}
+
+static int sun9i_a80_mmc_config_clk_remove(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       struct sun9i_mmc_clk_data *data = platform_get_drvdata(pdev);
+       struct clk_onecell_data *clk_data = &data->clk_data;
+       int i;
+
+       reset_controller_unregister(&data->rcdev);
+       of_clk_del_provider(np);
+       for (i = 0; i < clk_data->clk_num; i++)
+               clk_unregister(clk_data->clks[i]);
+
+       reset_control_assert(data->reset);
+
+       return 0;
+}
+
+static const struct of_device_id sun9i_a80_mmc_config_clk_dt_ids[] = {
+       { .compatible = "allwinner,sun9i-a80-mmc-config-clk" },
+       { /* sentinel */ }
+};
+
+static struct platform_driver sun9i_a80_mmc_config_clk_driver = {
+       .driver = {
+               .name = "sun9i-a80-mmc-config-clk",
+               .of_match_table = sun9i_a80_mmc_config_clk_dt_ids,
+       },
+       .probe = sun9i_a80_mmc_config_clk_probe,
+       .remove = sun9i_a80_mmc_config_clk_remove,
+};
+module_platform_driver(sun9i_a80_mmc_config_clk_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner A80 MMC clock/reset Driver");
+MODULE_LICENSE("GPL v2");
index 1818f404538d377d22b8b1d1602a0130e388018a..379324eb5486e1b332d19fcf66fc8d7a0c4aff15 100644 (file)
 #include <linux/of_address.h>
 #include <linux/reset-controller.h>
 #include <linux/spinlock.h>
+#include <linux/log2.h>
 
 #include "clk-factors.h"
 
 static DEFINE_SPINLOCK(clk_lock);
 
+/**
+ * sun6i_a31_ahb1_clk_setup() - Setup function for a31 ahb1 composite clk
+ */
+
+#define SUN6I_AHB1_MAX_PARENTS         4
+#define SUN6I_AHB1_MUX_PARENT_PLL6     3
+#define SUN6I_AHB1_MUX_SHIFT           12
+/* un-shifted mask is what mux_clk expects */
+#define SUN6I_AHB1_MUX_MASK            0x3
+#define SUN6I_AHB1_MUX_GET_PARENT(reg) ((reg >> SUN6I_AHB1_MUX_SHIFT) & \
+                                        SUN6I_AHB1_MUX_MASK)
+
+#define SUN6I_AHB1_DIV_SHIFT           4
+#define SUN6I_AHB1_DIV_MASK            (0x3 << SUN6I_AHB1_DIV_SHIFT)
+#define SUN6I_AHB1_DIV_GET(reg)                ((reg & SUN6I_AHB1_DIV_MASK) >> \
+                                               SUN6I_AHB1_DIV_SHIFT)
+#define SUN6I_AHB1_DIV_SET(reg, div)   ((reg & ~SUN6I_AHB1_DIV_MASK) | \
+                                               (div << SUN6I_AHB1_DIV_SHIFT))
+#define SUN6I_AHB1_PLL6_DIV_SHIFT      6
+#define SUN6I_AHB1_PLL6_DIV_MASK       (0x3 << SUN6I_AHB1_PLL6_DIV_SHIFT)
+#define SUN6I_AHB1_PLL6_DIV_GET(reg)   ((reg & SUN6I_AHB1_PLL6_DIV_MASK) >> \
+                                               SUN6I_AHB1_PLL6_DIV_SHIFT)
+#define SUN6I_AHB1_PLL6_DIV_SET(reg, div) ((reg & ~SUN6I_AHB1_PLL6_DIV_MASK) | \
+                                               (div << SUN6I_AHB1_PLL6_DIV_SHIFT))
+
+struct sun6i_ahb1_clk {
+       struct clk_hw hw;
+       void __iomem *reg;
+};
+
+#define to_sun6i_ahb1_clk(_hw) container_of(_hw, struct sun6i_ahb1_clk, hw)
+
+static unsigned long sun6i_ahb1_clk_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
+       unsigned long rate;
+       u32 reg;
+
+       /* Fetch the register value */
+       reg = readl(ahb1->reg);
+
+       /* apply pre-divider first if parent is pll6 */
+       if (SUN6I_AHB1_MUX_GET_PARENT(reg) == SUN6I_AHB1_MUX_PARENT_PLL6)
+               parent_rate /= SUN6I_AHB1_PLL6_DIV_GET(reg) + 1;
+
+       /* clk divider */
+       rate = parent_rate >> SUN6I_AHB1_DIV_GET(reg);
+
+       return rate;
+}
+
+static long sun6i_ahb1_clk_round(unsigned long rate, u8 *divp, u8 *pre_divp,
+                                u8 parent, unsigned long parent_rate)
+{
+       u8 div, calcp, calcm = 1;
+
+       /*
+        * clock can only divide, so we will never be able to achieve
+        * frequencies higher than the parent frequency
+        */
+       if (parent_rate && rate > parent_rate)
+               rate = parent_rate;
+
+       div = DIV_ROUND_UP(parent_rate, rate);
+
+       /* calculate pre-divider if parent is pll6 */
+       if (parent == SUN6I_AHB1_MUX_PARENT_PLL6) {
+               if (div < 4)
+                       calcp = 0;
+               else if (div / 2 < 4)
+                       calcp = 1;
+               else if (div / 4 < 4)
+                       calcp = 2;
+               else
+                       calcp = 3;
+
+               calcm = DIV_ROUND_UP(div, 1 << calcp);
+       } else {
+               calcp = __roundup_pow_of_two(div);
+               calcp = calcp > 3 ? 3 : calcp;
+       }
+
+       /* we were asked to pass back divider values */
+       if (divp) {
+               *divp = calcp;
+               *pre_divp = calcm - 1;
+       }
+
+       return (parent_rate / calcm) >> calcp;
+}
+
+static long sun6i_ahb1_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                         unsigned long min_rate,
+                                         unsigned long max_rate,
+                                         unsigned long *best_parent_rate,
+                                         struct clk_hw **best_parent_clk)
+{
+       struct clk *clk = hw->clk, *parent, *best_parent = NULL;
+       int i, num_parents;
+       unsigned long parent_rate, best = 0, child_rate, best_child_rate = 0;
+
+       /* find the parent that can help provide the fastest rate <= rate */
+       num_parents = __clk_get_num_parents(clk);
+       for (i = 0; i < num_parents; i++) {
+               parent = clk_get_parent_by_index(clk, i);
+               if (!parent)
+                       continue;
+               if (__clk_get_flags(clk) & CLK_SET_RATE_PARENT)
+                       parent_rate = __clk_round_rate(parent, rate);
+               else
+                       parent_rate = __clk_get_rate(parent);
+
+               child_rate = sun6i_ahb1_clk_round(rate, NULL, NULL, i,
+                                                 parent_rate);
+
+               if (child_rate <= rate && child_rate > best_child_rate) {
+                       best_parent = parent;
+                       best = parent_rate;
+                       best_child_rate = child_rate;
+               }
+       }
+
+       if (best_parent)
+               *best_parent_clk = __clk_get_hw(best_parent);
+       *best_parent_rate = best;
+
+       return best_child_rate;
+}
+
+static int sun6i_ahb1_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+                                  unsigned long parent_rate)
+{
+       struct sun6i_ahb1_clk *ahb1 = to_sun6i_ahb1_clk(hw);
+       unsigned long flags;
+       u8 div, pre_div, parent;
+       u32 reg;
+
+       spin_lock_irqsave(&clk_lock, flags);
+
+       reg = readl(ahb1->reg);
+
+       /* need to know which parent is used to apply pre-divider */
+       parent = SUN6I_AHB1_MUX_GET_PARENT(reg);
+       sun6i_ahb1_clk_round(rate, &div, &pre_div, parent, parent_rate);
+
+       reg = SUN6I_AHB1_DIV_SET(reg, div);
+       reg = SUN6I_AHB1_PLL6_DIV_SET(reg, pre_div);
+       writel(reg, ahb1->reg);
+
+       spin_unlock_irqrestore(&clk_lock, flags);
+
+       return 0;
+}
+
+static const struct clk_ops sun6i_ahb1_clk_ops = {
+       .determine_rate = sun6i_ahb1_clk_determine_rate,
+       .recalc_rate    = sun6i_ahb1_clk_recalc_rate,
+       .set_rate       = sun6i_ahb1_clk_set_rate,
+};
+
+static void __init sun6i_ahb1_clk_setup(struct device_node *node)
+{
+       struct clk *clk;
+       struct sun6i_ahb1_clk *ahb1;
+       struct clk_mux *mux;
+       const char *clk_name = node->name;
+       const char *parents[SUN6I_AHB1_MAX_PARENTS];
+       void __iomem *reg;
+       int i = 0;
+
+       reg = of_io_request_and_map(node, 0, of_node_full_name(node));
+
+       /* we have a mux, we will have >1 parents */
+       while (i < SUN6I_AHB1_MAX_PARENTS &&
+              (parents[i] = of_clk_get_parent_name(node, i)) != NULL)
+               i++;
+
+       of_property_read_string(node, "clock-output-names", &clk_name);
+
+       ahb1 = kzalloc(sizeof(struct sun6i_ahb1_clk), GFP_KERNEL);
+       if (!ahb1)
+               return;
+
+       mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
+       if (!mux) {
+               kfree(ahb1);
+               return;
+       }
+
+       /* set up clock properties */
+       mux->reg = reg;
+       mux->shift = SUN6I_AHB1_MUX_SHIFT;
+       mux->mask = SUN6I_AHB1_MUX_MASK;
+       mux->lock = &clk_lock;
+       ahb1->reg = reg;
+
+       clk = clk_register_composite(NULL, clk_name, parents, i,
+                                    &mux->hw, &clk_mux_ops,
+                                    &ahb1->hw, &sun6i_ahb1_clk_ops,
+                                    NULL, NULL, 0);
+
+       if (!IS_ERR(clk)) {
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+               clk_register_clkdev(clk, clk_name, NULL);
+       }
+}
+CLK_OF_DECLARE(sun6i_a31_ahb1, "allwinner,sun6i-a31-ahb1-clk", sun6i_ahb1_clk_setup);
+
 /* Maximum number of parents our clocks have */
 #define SUNXI_MAX_PARENTS      5
 
@@ -354,43 +564,6 @@ static void sun7i_a20_get_out_factors(u32 *freq, u32 parent_rate,
        *p = calcp;
 }
 
-/**
- * clk_sunxi_mmc_phase_control() - configures MMC clock phase control
- */
-
-void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output)
-{
-       #define to_clk_composite(_hw) container_of(_hw, struct clk_composite, hw)
-       #define to_clk_factors(_hw) container_of(_hw, struct clk_factors, hw)
-
-       struct clk_hw *hw = __clk_get_hw(clk);
-       struct clk_composite *composite = to_clk_composite(hw);
-       struct clk_hw *rate_hw = composite->rate_hw;
-       struct clk_factors *factors = to_clk_factors(rate_hw);
-       unsigned long flags = 0;
-       u32 reg;
-
-       if (factors->lock)
-               spin_lock_irqsave(factors->lock, flags);
-
-       reg = readl(factors->reg);
-
-       /* set sample clock phase control */
-       reg &= ~(0x7 << 20);
-       reg |= ((sample & 0x7) << 20);
-
-       /* set output clock phase control */
-       reg &= ~(0x7 << 8);
-       reg |= ((output & 0x7) << 8);
-
-       writel(reg, factors->reg);
-
-       if (factors->lock)
-               spin_unlock_irqrestore(factors->lock, flags);
-}
-EXPORT_SYMBOL(clk_sunxi_mmc_phase_control);
-
-
 /**
  * sunxi_factors_clk_setup() - Setup function for factor clocks
  */
@@ -413,6 +586,7 @@ static struct clk_factors_config sun6i_a31_pll1_config = {
        .kwidth = 2,
        .mshift = 0,
        .mwidth = 2,
+       .n_start = 1,
 };
 
 static struct clk_factors_config sun8i_a23_pll1_config = {
@@ -520,7 +694,16 @@ static const struct factors_data sun7i_a20_out_data __initconst = {
 static struct clk * __init sunxi_factors_clk_setup(struct device_node *node,
                                                   const struct factors_data *data)
 {
-       return sunxi_factors_register(node, data, &clk_lock);
+       void __iomem *reg;
+
+       reg = of_iomap(node, 0);
+       if (!reg) {
+               pr_err("Could not get registers for factors-clk: %s\n",
+                      node->name);
+               return NULL;
+       }
+
+       return sunxi_factors_register(node, data, &clk_lock, reg);
 }
 
 
@@ -561,7 +744,7 @@ static void __init sunxi_mux_clk_setup(struct device_node *node,
        of_property_read_string(node, "clock-output-names", &clk_name);
 
        clk = clk_register_mux(NULL, clk_name, parents, i,
-                              CLK_SET_RATE_NO_REPARENT, reg,
+                              CLK_SET_RATE_PARENT, reg,
                               data->shift, SUNXI_MUX_GATE_WIDTH,
                               0, &clk_lock);
 
@@ -1217,7 +1400,6 @@ CLK_OF_DECLARE(sun7i_a20_clk_init, "allwinner,sun7i-a20", sun5i_init_clocks);
 
 static const char *sun6i_critical_clocks[] __initdata = {
        "cpu",
-       "ahb1_sdram",
 };
 
 static void __init sun6i_init_clocks(struct device_node *node)
index f7dfb72884a4e2d177261984023d7c20f9249a22..edb8358fa6cebab596e7f39c022736a4b6c31457 100644 (file)
@@ -15,3 +15,4 @@ obj-$(CONFIG_ARCH_TEGRA_2x_SOC)         += clk-tegra20.o
 obj-$(CONFIG_ARCH_TEGRA_3x_SOC)         += clk-tegra30.o
 obj-$(CONFIG_ARCH_TEGRA_114_SOC)       += clk-tegra114.o
 obj-$(CONFIG_ARCH_TEGRA_124_SOC)       += clk-tegra124.o
+obj-$(CONFIG_ARCH_TEGRA_132_SOC)       += clk-tegra124.o
index 0011d547a9f7ed1e5767625582c1b1ab59a34c9e..60738cc954cb3dd857763d7aaa99ea955d939e5e 100644 (file)
@@ -64,10 +64,8 @@ enum clk_id {
        tegra_clk_disp2,
        tegra_clk_dp2,
        tegra_clk_dpaux,
-       tegra_clk_dsia,
        tegra_clk_dsialp,
        tegra_clk_dsia_mux,
-       tegra_clk_dsib,
        tegra_clk_dsiblp,
        tegra_clk_dsib_mux,
        tegra_clk_dtv,
index 9e899c18af8678c8eac70fab0ede9d8631fff3f3..d84ae49d0e05eead08c6379a0236c339ff88cb5c 100644 (file)
@@ -28,7 +28,7 @@ static u8 clk_periph_get_parent(struct clk_hw *hw)
        const struct clk_ops *mux_ops = periph->mux_ops;
        struct clk_hw *mux_hw = &periph->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return mux_ops->get_parent(mux_hw);
 }
@@ -39,7 +39,7 @@ static int clk_periph_set_parent(struct clk_hw *hw, u8 index)
        const struct clk_ops *mux_ops = periph->mux_ops;
        struct clk_hw *mux_hw = &periph->mux.hw;
 
-       mux_hw->clk = hw->clk;
+       __clk_hw_set_clk(mux_hw, hw);
 
        return mux_ops->set_parent(mux_hw, index);
 }
@@ -51,7 +51,7 @@ static unsigned long clk_periph_recalc_rate(struct clk_hw *hw,
        const struct clk_ops *div_ops = periph->div_ops;
        struct clk_hw *div_hw = &periph->divider.hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return div_ops->recalc_rate(div_hw, parent_rate);
 }
@@ -63,7 +63,7 @@ static long clk_periph_round_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *div_ops = periph->div_ops;
        struct clk_hw *div_hw = &periph->divider.hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return div_ops->round_rate(div_hw, rate, prate);
 }
@@ -75,7 +75,7 @@ static int clk_periph_set_rate(struct clk_hw *hw, unsigned long rate,
        const struct clk_ops *div_ops = periph->div_ops;
        struct clk_hw *div_hw = &periph->divider.hw;
 
-       div_hw->clk = hw->clk;
+       __clk_hw_set_clk(div_hw, hw);
 
        return div_ops->set_rate(div_hw, rate, parent_rate);
 }
@@ -86,7 +86,7 @@ static int clk_periph_is_enabled(struct clk_hw *hw)
        const struct clk_ops *gate_ops = periph->gate_ops;
        struct clk_hw *gate_hw = &periph->gate.hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        return gate_ops->is_enabled(gate_hw);
 }
@@ -97,7 +97,7 @@ static int clk_periph_enable(struct clk_hw *hw)
        const struct clk_ops *gate_ops = periph->gate_ops;
        struct clk_hw *gate_hw = &periph->gate.hw;
 
-       gate_hw->clk = hw->clk;
+       __clk_hw_set_clk(gate_hw, hw);
 
        return gate_ops->enable(gate_hw);
 }
index c7c6d8fb32fbb14bfc0727024bdd91ccab6009b8..bfef9abdf23250d39504587f7f41d3ca6ec6e06a 100644 (file)
@@ -816,7 +816,9 @@ const struct clk_ops tegra_clk_plle_ops = {
        .enable = clk_plle_enable,
 };
 
-#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+       defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+       defined(CONFIG_ARCH_TEGRA_132_SOC)
 
 static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
                           unsigned long parent_rate)
@@ -1505,7 +1507,9 @@ struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
        return clk;
 }
 
-#if defined(CONFIG_ARCH_TEGRA_114_SOC) || defined(CONFIG_ARCH_TEGRA_124_SOC)
+#if defined(CONFIG_ARCH_TEGRA_114_SOC) || \
+       defined(CONFIG_ARCH_TEGRA_124_SOC) || \
+       defined(CONFIG_ARCH_TEGRA_132_SOC)
 static const struct clk_ops tegra_clk_pllxc_ops = {
        .is_enabled = clk_pll_is_enabled,
        .enable = clk_pll_iddq_enable,
@@ -1565,7 +1569,7 @@ struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
        parent = __clk_lookup(parent_name);
        if (!parent) {
                WARN(1, "parent clk %s of %s must be registered first\n",
-                       name, parent_name);
+                       parent_name, name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -1665,7 +1669,7 @@ struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
        parent = __clk_lookup(parent_name);
        if (!parent) {
                WARN(1, "parent clk %s of %s must be registered first\n",
-                       name, parent_name);
+                       parent_name, name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -1706,7 +1710,7 @@ struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
        parent = __clk_lookup(parent_name);
        if (!parent) {
                WARN(1, "parent clk %s of %s must be registered first\n",
-                       name, parent_name);
+                       parent_name, name);
                return ERR_PTR(-EINVAL);
        }
 
@@ -1802,7 +1806,7 @@ struct clk *tegra_clk_register_plle_tegra114(const char *name,
 }
 #endif
 
-#ifdef CONFIG_ARCH_TEGRA_124_SOC
+#if defined(CONFIG_ARCH_TEGRA_124_SOC) || defined(CONFIG_ARCH_TEGRA_132_SOC)
 static const struct clk_ops tegra_clk_pllss_ops = {
        .is_enabled = clk_pll_is_enabled,
        .enable = clk_pll_iddq_enable,
@@ -1830,7 +1834,7 @@ struct clk *tegra_clk_register_pllss(const char *name, const char *parent_name,
        parent = __clk_lookup(parent_name);
        if (!parent) {
                WARN(1, "parent clk %s of %s must be registered first\n",
-                       name, parent_name);
+                       parent_name, name);
                return ERR_PTR(-EINVAL);
        }
 
index 37f32c49674eb293cb242b34b446ce73e227de37..cef0727b9eec98b91de3db3c2373fe8e508e5750 100644 (file)
@@ -434,10 +434,10 @@ static struct tegra_periph_init_data periph_clks[] = {
        MUX("hda", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA, 125, TEGRA_PERIPH_ON_APB, tegra_clk_hda),
        MUX("hda2codec_2x", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_HDA2CODEC_2X, 111, TEGRA_PERIPH_ON_APB, tegra_clk_hda2codec_2x),
        MUX("vfir", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_VFIR, 7, TEGRA_PERIPH_ON_APB, tegra_clk_vfir),
-       MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1),
-       MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2),
-       MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3),
-       MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4),
+       MUX("sdmmc1", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1),
+       MUX("sdmmc2", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2),
+       MUX("sdmmc3", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3),
+       MUX("sdmmc4", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4),
        MUX("la", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_LA, 76, TEGRA_PERIPH_ON_APB, tegra_clk_la),
        MUX("trace", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_TRACE, 77, TEGRA_PERIPH_ON_APB, tegra_clk_trace),
        MUX("owr", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_OWR, 71, TEGRA_PERIPH_ON_APB, tegra_clk_owr),
@@ -470,10 +470,10 @@ static struct tegra_periph_init_data periph_clks[] = {
        MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
        MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
        MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 165, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
-       MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8),
-       MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8),
-       MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8),
-       MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8),
+       MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc1_8),
+       MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc2_8),
+       MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc3_8),
+       MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, TEGRA_PERIPH_ON_APB, tegra_clk_sdmmc4_8),
        MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
        MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
        MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
@@ -537,8 +537,6 @@ static struct tegra_periph_init_data gate_clks[] = {
        GATE("xusb_host", "xusb_host_src", 89, 0, tegra_clk_xusb_host, 0),
        GATE("xusb_ss", "xusb_ss_src", 156, 0, tegra_clk_xusb_ss, 0),
        GATE("xusb_dev", "xusb_dev_src", 95, 0, tegra_clk_xusb_dev, 0),
-       GATE("dsia", "dsia_mux", 48, 0, tegra_clk_dsia, 0),
-       GATE("dsib", "dsib_mux", 82, 0, tegra_clk_dsib, 0),
        GATE("emc", "emc_mux", 57, 0, tegra_clk_emc, CLK_IGNORE_UNUSED),
        GATE("sata_cold", "clk_m", 129, TEGRA_PERIPH_ON_APB, tegra_clk_sata_cold, 0),
        GATE("ispb", "clk_m", 3, 0, tegra_clk_ispb, 0),
index 0b03d2cf7264f7d42dafb84fd83bb29e18f420a1..d0766423a5d607554ff8f9e9f9b7f610f5d875a3 100644 (file)
@@ -715,7 +715,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
        [tegra_clk_sbc2_8] = { .dt_id = TEGRA114_CLK_SBC2, .present = true },
        [tegra_clk_sbc3_8] = { .dt_id = TEGRA114_CLK_SBC3, .present = true },
        [tegra_clk_i2c5] = { .dt_id = TEGRA114_CLK_I2C5, .present = true },
-       [tegra_clk_dsia] = { .dt_id = TEGRA114_CLK_DSIA, .present = true },
        [tegra_clk_mipi] = { .dt_id = TEGRA114_CLK_MIPI, .present = true },
        [tegra_clk_hdmi] = { .dt_id = TEGRA114_CLK_HDMI, .present = true },
        [tegra_clk_csi] = { .dt_id = TEGRA114_CLK_CSI, .present = true },
@@ -739,7 +738,6 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
        [tegra_clk_dtv] = { .dt_id = TEGRA114_CLK_DTV, .present = true },
        [tegra_clk_ndspeed] = { .dt_id = TEGRA114_CLK_NDSPEED, .present = true },
        [tegra_clk_i2cslow] = { .dt_id = TEGRA114_CLK_I2CSLOW, .present = true },
-       [tegra_clk_dsib] = { .dt_id = TEGRA114_CLK_DSIB, .present = true },
        [tegra_clk_tsec] = { .dt_id = TEGRA114_CLK_TSEC, .present = true },
        [tegra_clk_xusb_host] = { .dt_id = TEGRA114_CLK_XUSB_HOST, .present = true },
        [tegra_clk_msenc] = { .dt_id = TEGRA114_CLK_MSENC, .present = true },
@@ -1224,6 +1222,14 @@ static __init void tegra114_periph_clk_init(void __iomem *clk_base,
                               clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
        clks[TEGRA114_CLK_DSIB_MUX] = clk;
 
+       clk = tegra_clk_register_periph_gate("dsia", "dsia_mux", 0, clk_base,
+                                            0, 48, periph_clk_enb_refcnt);
+       clks[TEGRA114_CLK_DSIA] = clk;
+
+       clk = tegra_clk_register_periph_gate("dsib", "dsib_mux", 0, clk_base,
+                                            0, 82, periph_clk_enb_refcnt);
+       clks[TEGRA114_CLK_DSIB] = clk;
+
        /* emc mux */
        clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
                               ARRAY_SIZE(mux_pllmcp_clkm),
index f5f9baca7bb621924d14ff4d9364d18580e0fbc2..9a893f2fe8e9889e7e1df95d12ba90d687763942 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2012, 2013, NVIDIA CORPORATION.  All rights reserved.
+ * Copyright (c) 2012-2014 NVIDIA CORPORATION.  All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
 #include "clk.h"
 #include "clk-id.h"
 
+/*
+ * TEGRA124_CAR_BANK_COUNT: the number of peripheral clock register
+ * banks present in the Tegra124/132 CAR IP block.  The banks are
+ * identified by single letters, e.g.: L, H, U, V, W, X.  See
+ * periph_regs[] in drivers/clk/tegra/clk.c
+ */
+#define TEGRA124_CAR_BANK_COUNT                        6
+
 #define CLK_SOURCE_CSITE 0x1d4
 #define CLK_SOURCE_EMC 0x19c
 
@@ -128,7 +136,6 @@ static unsigned long osc_freq;
 static unsigned long pll_ref_freq;
 
 static DEFINE_SPINLOCK(pll_d_lock);
-static DEFINE_SPINLOCK(pll_d2_lock);
 static DEFINE_SPINLOCK(pll_e_lock);
 static DEFINE_SPINLOCK(pll_re_lock);
 static DEFINE_SPINLOCK(pll_u_lock);
@@ -145,11 +152,6 @@ static unsigned long tegra124_input_freq[] = {
        [12] = 260000000,
 };
 
-static const char *mux_plld_out0_plld2_out0[] = {
-       "pll_d_out0", "pll_d2_out0",
-};
-#define mux_plld_out0_plld2_out0_idx NULL
-
 static const char *mux_pllmcp_clkm[] = {
        "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
 };
@@ -783,7 +785,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_sbc2] = { .dt_id = TEGRA124_CLK_SBC2, .present = true },
        [tegra_clk_sbc3] = { .dt_id = TEGRA124_CLK_SBC3, .present = true },
        [tegra_clk_i2c5] = { .dt_id = TEGRA124_CLK_I2C5, .present = true },
-       [tegra_clk_dsia] = { .dt_id = TEGRA124_CLK_DSIA, .present = true },
        [tegra_clk_mipi] = { .dt_id = TEGRA124_CLK_MIPI, .present = true },
        [tegra_clk_hdmi] = { .dt_id = TEGRA124_CLK_HDMI, .present = true },
        [tegra_clk_csi] = { .dt_id = TEGRA124_CLK_CSI, .present = true },
@@ -809,7 +810,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_soc_therm] = { .dt_id = TEGRA124_CLK_SOC_THERM, .present = true },
        [tegra_clk_dtv] = { .dt_id = TEGRA124_CLK_DTV, .present = true },
        [tegra_clk_i2cslow] = { .dt_id = TEGRA124_CLK_I2CSLOW, .present = true },
-       [tegra_clk_dsib] = { .dt_id = TEGRA124_CLK_DSIB, .present = true },
        [tegra_clk_tsec] = { .dt_id = TEGRA124_CLK_TSEC, .present = true },
        [tegra_clk_xusb_host] = { .dt_id = TEGRA124_CLK_XUSB_HOST, .present = true },
        [tegra_clk_msenc] = { .dt_id = TEGRA124_CLK_MSENC, .present = true },
@@ -949,8 +949,6 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
        [tegra_clk_clk_out_1_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_1_MUX, .present = true },
        [tegra_clk_clk_out_2_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_2_MUX, .present = true },
        [tegra_clk_clk_out_3_mux] = { .dt_id = TEGRA124_CLK_CLK_OUT_3_MUX, .present = true },
-       [tegra_clk_dsia_mux] = { .dt_id = TEGRA124_CLK_DSIA_MUX, .present = true },
-       [tegra_clk_dsib_mux] = { .dt_id = TEGRA124_CLK_DSIB_MUX, .present = true },
 };
 
 static struct tegra_devclk devclks[] __initdata = {
@@ -1112,17 +1110,17 @@ static __init void tegra124_periph_clk_init(void __iomem *clk_base,
                                        1, 2);
        clks[TEGRA124_CLK_XUSB_SS_DIV2] = clk;
 
-       /* dsia mux */
-       clk = clk_register_mux(NULL, "dsia_mux", mux_plld_out0_plld2_out0,
-                              ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
-                              clk_base + PLLD_BASE, 25, 1, 0, &pll_d_lock);
-       clks[TEGRA124_CLK_DSIA_MUX] = clk;
+       clk = clk_register_gate(NULL, "plld_dsi", "plld_out0", 0,
+                               clk_base + PLLD_MISC, 30, 0, &pll_d_lock);
+       clks[TEGRA124_CLK_PLLD_DSI] = clk;
+
+       clk = tegra_clk_register_periph_gate("dsia", "plld_dsi", 0, clk_base,
+                                            0, 48, periph_clk_enb_refcnt);
+       clks[TEGRA124_CLK_DSIA] = clk;
 
-       /* dsib mux */
-       clk = clk_register_mux(NULL, "dsib_mux", mux_plld_out0_plld2_out0,
-                              ARRAY_SIZE(mux_plld_out0_plld2_out0), 0,
-                              clk_base + PLLD2_BASE, 25, 1, 0, &pll_d2_lock);
-       clks[TEGRA124_CLK_DSIB_MUX] = clk;
+       clk = tegra_clk_register_periph_gate("dsib", "plld_dsi", 0, clk_base,
+                                            0, 82, periph_clk_enb_refcnt);
+       clks[TEGRA124_CLK_DSIB] = clk;
 
        /* emc mux */
        clk = clk_register_mux(NULL, "emc_mux", mux_pllmcp_clkm,
@@ -1351,7 +1349,7 @@ static const struct of_device_id pmc_match[] __initconst = {
        {},
 };
 
-static struct tegra_clk_init_table init_table[] __initdata = {
+static struct tegra_clk_init_table common_init_table[] __initdata = {
        {TEGRA124_CLK_UARTA, TEGRA124_CLK_PLL_P, 408000000, 0},
        {TEGRA124_CLK_UARTB, TEGRA124_CLK_PLL_P, 408000000, 0},
        {TEGRA124_CLK_UARTC, TEGRA124_CLK_PLL_P, 408000000, 0},
@@ -1368,6 +1366,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        {TEGRA124_CLK_I2S4, TEGRA124_CLK_PLL_A_OUT0, 11289600, 0},
        {TEGRA124_CLK_VDE, TEGRA124_CLK_PLL_P, 0, 0},
        {TEGRA124_CLK_HOST1X, TEGRA124_CLK_PLL_P, 136000000, 1},
+       {TEGRA124_CLK_DSIALP, TEGRA124_CLK_PLL_P, 68000000, 0},
+       {TEGRA124_CLK_DSIBLP, TEGRA124_CLK_PLL_P, 68000000, 0},
        {TEGRA124_CLK_SCLK, TEGRA124_CLK_PLL_P_OUT2, 102000000, 1},
        {TEGRA124_CLK_DFLL_SOC, TEGRA124_CLK_PLL_P, 51000000, 1},
        {TEGRA124_CLK_DFLL_REF, TEGRA124_CLK_PLL_P, 51000000, 1},
@@ -1385,27 +1385,73 @@ static struct tegra_clk_init_table init_table[] __initdata = {
        {TEGRA124_CLK_SATA, TEGRA124_CLK_PLL_P, 104000000, 0},
        {TEGRA124_CLK_SATA_OOB, TEGRA124_CLK_PLL_P, 204000000, 0},
        {TEGRA124_CLK_EMC, TEGRA124_CLK_CLK_MAX, 0, 1},
-       {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
        {TEGRA124_CLK_MSELECT, TEGRA124_CLK_CLK_MAX, 0, 1},
        {TEGRA124_CLK_CSITE, TEGRA124_CLK_CLK_MAX, 0, 1},
        {TEGRA124_CLK_TSENSOR, TEGRA124_CLK_CLK_M, 400000, 0},
+       /* This MUST be the last entry. */
+       {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
+};
+
+static struct tegra_clk_init_table tegra124_init_table[] __initdata = {
        {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 0},
+       {TEGRA124_CLK_CCLK_G, TEGRA124_CLK_CLK_MAX, 0, 1},
+       /* This MUST be the last entry. */
+       {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
+};
+
+/* Tegra132 requires the SOC_THERM clock to remain active */
+static struct tegra_clk_init_table tegra132_init_table[] __initdata = {
+       {TEGRA124_CLK_SOC_THERM, TEGRA124_CLK_PLL_P, 51000000, 1},
        /* This MUST be the last entry. */
        {TEGRA124_CLK_CLK_MAX, TEGRA124_CLK_CLK_MAX, 0, 0},
 };
 
+/**
+ * tegra124_clock_apply_init_table - initialize clocks on Tegra124 SoCs
+ *
+ * Program an initial clock rate and enable or disable clocks needed
+ * by the rest of the kernel, for Tegra124 SoCs.  It is intended to be
+ * called by assigning a pointer to it to tegra_clk_apply_init_table -
+ * this will be called as an arch_initcall.  No return value.
+ */
 static void __init tegra124_clock_apply_init_table(void)
 {
-       tegra_init_from_table(init_table, clks, TEGRA124_CLK_CLK_MAX);
+       tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX);
+       tegra_init_from_table(tegra124_init_table, clks, TEGRA124_CLK_CLK_MAX);
 }
 
-static void __init tegra124_clock_init(struct device_node *np)
+/**
+ * tegra132_clock_apply_init_table - initialize clocks on Tegra132 SoCs
+ *
+ * Program an initial clock rate and enable or disable clocks needed
+ * by the rest of the kernel, for Tegra132 SoCs.  It is intended to be
+ * called by assigning a pointer to it to tegra_clk_apply_init_table -
+ * this will be called as an arch_initcall.  No return value.
+ */
+static void __init tegra132_clock_apply_init_table(void)
+{
+       tegra_init_from_table(common_init_table, clks, TEGRA124_CLK_CLK_MAX);
+       tegra_init_from_table(tegra132_init_table, clks, TEGRA124_CLK_CLK_MAX);
+}
+
+/**
+ * tegra124_132_clock_init_pre - clock initialization preamble for T124/T132
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most of the clocks controlled by the CAR IP block, along
+ * with a few clocks controlled by the PMC IP block.  Everything in
+ * this function should be common to Tegra124 and Tegra132.  XXX The
+ * PMC clock initialization should probably be moved to PMC-specific
+ * driver code.  No return value.
+ */
+static void __init tegra124_132_clock_init_pre(struct device_node *np)
 {
        struct device_node *node;
+       u32 plld_base;
 
        clk_base = of_iomap(np, 0);
        if (!clk_base) {
-               pr_err("ioremap tegra124 CAR failed\n");
+               pr_err("ioremap tegra124/tegra132 CAR failed\n");
                return;
        }
 
@@ -1423,7 +1469,8 @@ static void __init tegra124_clock_init(struct device_node *np)
                return;
        }
 
-       clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX, 6);
+       clks = tegra_clk_init(clk_base, TEGRA124_CLK_CLK_MAX,
+                             TEGRA124_CAR_BANK_COUNT);
        if (!clks)
                return;
 
@@ -1437,13 +1484,76 @@ static void __init tegra124_clock_init(struct device_node *np)
        tegra_audio_clk_init(clk_base, pmc_base, tegra124_clks, &pll_a_params);
        tegra_pmc_clk_init(pmc_base, tegra124_clks);
 
+       /* For Tegra124 & Tegra132, PLLD is the only source for DSIA & DSIB */
+       plld_base = clk_readl(clk_base + PLLD_BASE);
+       plld_base &= ~BIT(25);
+       clk_writel(plld_base, clk_base + PLLD_BASE);
+}
+
+/**
+ * tegra124_132_clock_init_post - clock initialization postamble for T124/T132
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most of the along with a few clocks controlled by the PMC
+ * IP block.  Everything in this function should be common to Tegra124
+ * and Tegra132.  This function must be called after
+ * tegra124_132_clock_init_pre(), otherwise clk_base and pmc_base will
+ * not be set.  No return value.
+ */
+static void __init tegra124_132_clock_init_post(struct device_node *np)
+{
        tegra_super_clk_gen4_init(clk_base, pmc_base, tegra124_clks,
-                                       &pll_x_params);
+                                 &pll_x_params);
        tegra_add_of_provider(np);
        tegra_register_devclks(devclks, ARRAY_SIZE(devclks));
 
+       tegra_cpu_car_ops = &tegra124_cpu_car_ops;
+}
+
+/**
+ * tegra124_clock_init - Tegra124-specific clock initialization
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most SoC clocks for the Tegra124 system-on-chip.  Most of
+ * this code is shared between the Tegra124 and Tegra132 SoCs,
+ * although some of the initial clock settings and CPU clocks differ.
+ * Intended to be called by the OF init code when a DT node with the
+ * "nvidia,tegra124-car" string is encountered, and declared with
+ * CLK_OF_DECLARE.  No return value.
+ */
+static void __init tegra124_clock_init(struct device_node *np)
+{
+       tegra124_132_clock_init_pre(np);
        tegra_clk_apply_init_table = tegra124_clock_apply_init_table;
+       tegra124_132_clock_init_post(np);
+}
 
-       tegra_cpu_car_ops = &tegra124_cpu_car_ops;
+/**
+ * tegra132_clock_init - Tegra132-specific clock initialization
+ * @np: struct device_node * of the DT node for the SoC CAR IP block
+ *
+ * Register most SoC clocks for the Tegra132 system-on-chip.  Most of
+ * this code is shared between the Tegra124 and Tegra132 SoCs,
+ * although some of the initial clock settings and CPU clocks differ.
+ * Intended to be called by the OF init code when a DT node with the
+ * "nvidia,tegra132-car" string is encountered, and declared with
+ * CLK_OF_DECLARE.  No return value.
+ */
+static void __init tegra132_clock_init(struct device_node *np)
+{
+       tegra124_132_clock_init_pre(np);
+
+       /*
+        * On Tegra132, these clocks are controlled by the
+        * CLUSTER_clocks IP block, located in the CPU complex
+        */
+       tegra124_clks[tegra_clk_cclk_g].present = false;
+       tegra124_clks[tegra_clk_cclk_lp].present = false;
+       tegra124_clks[tegra_clk_pll_x].present = false;
+       tegra124_clks[tegra_clk_pll_x_out0].present = false;
+
+       tegra_clk_apply_init_table = tegra132_clock_apply_init_table;
+       tegra124_132_clock_init_post(np);
 }
 CLK_OF_DECLARE(tegra124, "nvidia,tegra124-car", tegra124_clock_init);
+CLK_OF_DECLARE(tegra132, "nvidia,tegra132-car", tegra132_clock_init);
index 97dc8595c3cd5c1b59113deaa16bbd23a5298cfc..9ddb7547cb431b4b234d5ec6a41fba457abc2678 100644 (file)
@@ -302,10 +302,13 @@ struct clk ** __init tegra_lookup_dt_id(int clk_id,
 
 tegra_clk_apply_init_table_func tegra_clk_apply_init_table;
 
-void __init tegra_clocks_apply_init_table(void)
+static int __init tegra_clocks_apply_init_table(void)
 {
        if (!tegra_clk_apply_init_table)
-               return;
+               return 0;
 
        tegra_clk_apply_init_table();
+
+       return 0;
 }
+arch_initcall(tegra_clocks_apply_init_table);
index ed4d0aaf891639585825344d7460bbfcbc1c81e2..105ffd0f5e79da04e496b291e4eb3968b834ba64 100644 (file)
@@ -1,13 +1,17 @@
-ifneq ($(CONFIG_OF),)
 obj-y                                  += clk.o autoidle.o clockdomain.o
 clk-common                             = dpll.o composite.o divider.o gate.o \
                                          fixed-factor.o mux.o apll.o
 obj-$(CONFIG_SOC_AM33XX)               += $(clk-common) clk-33xx.o
+obj-$(CONFIG_SOC_TI81XX)               += $(clk-common) fapll.o clk-816x.o
 obj-$(CONFIG_ARCH_OMAP2)               += $(clk-common) interface.o clk-2xxx.o
-obj-$(CONFIG_ARCH_OMAP3)               += $(clk-common) interface.o clk-3xxx.o
+obj-$(CONFIG_ARCH_OMAP3)               += $(clk-common) interface.o \
+                                          clk-3xxx.o
 obj-$(CONFIG_ARCH_OMAP4)               += $(clk-common) clk-44xx.o
 obj-$(CONFIG_SOC_OMAP5)                        += $(clk-common) clk-54xx.o
 obj-$(CONFIG_SOC_DRA7XX)               += $(clk-common) clk-7xx.o \
                                           clk-dra7-atl.o
 obj-$(CONFIG_SOC_AM43XX)               += $(clk-common) clk-43xx.o
+
+ifdef CONFIG_ATAGS
+obj-$(CONFIG_ARCH_OMAP3)                += clk-3xxx-legacy.o
 endif
diff --git a/drivers/clk/ti/clk-3xxx-legacy.c b/drivers/clk/ti/clk-3xxx-legacy.c
new file mode 100644 (file)
index 0000000..e0732a4
--- /dev/null
@@ -0,0 +1,4653 @@
+/*
+ * OMAP3 Legacy clock data
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc
+ *     Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+#include "clock.h"
+
+static struct ti_clk_fixed virt_12m_ck_data = {
+       .frequency = 12000000,
+};
+
+static struct ti_clk virt_12m_ck = {
+       .name = "virt_12m_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_12m_ck_data,
+};
+
+static struct ti_clk_fixed virt_13m_ck_data = {
+       .frequency = 13000000,
+};
+
+static struct ti_clk virt_13m_ck = {
+       .name = "virt_13m_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_13m_ck_data,
+};
+
+static struct ti_clk_fixed virt_19200000_ck_data = {
+       .frequency = 19200000,
+};
+
+static struct ti_clk virt_19200000_ck = {
+       .name = "virt_19200000_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_19200000_ck_data,
+};
+
+static struct ti_clk_fixed virt_26000000_ck_data = {
+       .frequency = 26000000,
+};
+
+static struct ti_clk virt_26000000_ck = {
+       .name = "virt_26000000_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_26000000_ck_data,
+};
+
+static struct ti_clk_fixed virt_38_4m_ck_data = {
+       .frequency = 38400000,
+};
+
+static struct ti_clk virt_38_4m_ck = {
+       .name = "virt_38_4m_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_38_4m_ck_data,
+};
+
+static struct ti_clk_fixed virt_16_8m_ck_data = {
+       .frequency = 16800000,
+};
+
+static struct ti_clk virt_16_8m_ck = {
+       .name = "virt_16_8m_ck",
+       .type = TI_CLK_FIXED,
+       .data = &virt_16_8m_ck_data,
+};
+
+static const char *osc_sys_ck_parents[] = {
+       "virt_12m_ck",
+       "virt_13m_ck",
+       "virt_19200000_ck",
+       "virt_26000000_ck",
+       "virt_38_4m_ck",
+       "virt_16_8m_ck",
+};
+
+static struct ti_clk_mux osc_sys_ck_data = {
+       .num_parents = ARRAY_SIZE(osc_sys_ck_parents),
+       .reg = 0xd40,
+       .module = TI_CLKM_PRM,
+       .parents = osc_sys_ck_parents,
+};
+
+static struct ti_clk osc_sys_ck = {
+       .name = "osc_sys_ck",
+       .type = TI_CLK_MUX,
+       .data = &osc_sys_ck_data,
+};
+
+static struct ti_clk_divider sys_ck_data = {
+       .parent = "osc_sys_ck",
+       .bit_shift = 6,
+       .max_div = 3,
+       .reg = 0x1270,
+       .module = TI_CLKM_PRM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk sys_ck = {
+       .name = "sys_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &sys_ck_data,
+};
+
+static const char *dpll3_ck_parents[] = {
+       "sys_ck",
+       "sys_ck",
+};
+
+static struct ti_clk_dpll dpll3_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll3_ck_parents),
+       .control_reg = 0xd00,
+       .idlest_reg = 0xd20,
+       .mult_div1_reg = 0xd40,
+       .autoidle_reg = 0xd30,
+       .module = TI_CLKM_CM,
+       .parents = dpll3_ck_parents,
+       .flags = CLKF_CORE,
+       .freqsel_mask = 0xf0,
+       .div1_mask = 0x7f00,
+       .idlest_mask = 0x1,
+       .auto_recal_bit = 0x3,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x5,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x7,
+       .mult_mask = 0x7ff0000,
+       .recal_st_bit = 0x5,
+       .autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll3_ck = {
+       .name = "dpll3_ck",
+       .clkdm_name = "dpll3_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll3_ck_data,
+};
+
+static struct ti_clk_divider dpll3_m2_ck_data = {
+       .parent = "dpll3_ck",
+       .bit_shift = 27,
+       .max_div = 31,
+       .reg = 0xd40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll3_m2_ck = {
+       .name = "dpll3_m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll3_m2_ck_data,
+};
+
+static struct ti_clk_fixed_factor core_ck_data = {
+       .parent = "dpll3_m2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_ck = {
+       .name = "core_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_ck_data,
+};
+
+static struct ti_clk_divider l3_ick_data = {
+       .parent = "core_ck",
+       .max_div = 3,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk l3_ick = {
+       .name = "l3_ick",
+       .type = TI_CLK_DIVIDER,
+       .data = &l3_ick_data,
+};
+
+static struct ti_clk_fixed_factor security_l3_ick_data = {
+       .parent = "l3_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk security_l3_ick = {
+       .name = "security_l3_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &security_l3_ick_data,
+};
+
+static struct ti_clk_fixed_factor wkup_l4_ick_data = {
+       .parent = "sys_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk wkup_l4_ick = {
+       .name = "wkup_l4_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &wkup_l4_ick_data,
+};
+
+static struct ti_clk_gate usim_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 9,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk usim_ick = {
+       .name = "usim_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usim_ick_data,
+};
+
+static struct ti_clk_gate dss2_alwon_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 1,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk dss2_alwon_fck = {
+       .name = "dss2_alwon_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss2_alwon_fck_data,
+};
+
+static struct ti_clk_divider l4_ick_data = {
+       .parent = "l3_ick",
+       .bit_shift = 2,
+       .max_div = 3,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk l4_ick = {
+       .name = "l4_ick",
+       .type = TI_CLK_DIVIDER,
+       .data = &l4_ick_data,
+};
+
+static struct ti_clk_fixed_factor core_l4_ick_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_l4_ick = {
+       .name = "core_l4_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_l4_ick_data,
+};
+
+static struct ti_clk_gate mmchs2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 25,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mmchs2_ick = {
+       .name = "mmchs2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs2_ick_data,
+};
+
+static const char *dpll4_ck_parents[] = {
+       "sys_ck",
+       "sys_ck",
+};
+
+static struct ti_clk_dpll dpll4_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll4_ck_parents),
+       .control_reg = 0xd00,
+       .idlest_reg = 0xd20,
+       .mult_div1_reg = 0xd44,
+       .autoidle_reg = 0xd30,
+       .module = TI_CLKM_CM,
+       .parents = dpll4_ck_parents,
+       .flags = CLKF_PER,
+       .freqsel_mask = 0xf00000,
+       .modes = 0x82,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x2,
+       .auto_recal_bit = 0x13,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x6,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x70000,
+       .mult_mask = 0x7ff00,
+       .recal_st_bit = 0x6,
+       .autoidle_mask = 0x38,
+};
+
+static struct ti_clk dpll4_ck = {
+       .name = "dpll4_ck",
+       .clkdm_name = "dpll4_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll4_ck_data,
+};
+
+static struct ti_clk_divider dpll4_m2_ck_data = {
+       .parent = "dpll4_ck",
+       .max_div = 63,
+       .reg = 0xd48,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m2_ck = {
+       .name = "dpll4_m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m2_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m2x2_mul_ck_data = {
+       .parent = "dpll4_m2_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll4_m2x2_mul_ck = {
+       .name = "dpll4_m2x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m2x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m2x2_ck_data = {
+       .parent = "dpll4_m2x2_mul_ck",
+       .bit_shift = 0x1b,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m2x2_ck = {
+       .name = "dpll4_m2x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m2x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_alwon_fck_data = {
+       .parent = "dpll4_m2x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_alwon_fck = {
+       .name = "omap_96m_alwon_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_alwon_fck_data,
+};
+
+static struct ti_clk_fixed_factor cm_96m_fck_data = {
+       .parent = "omap_96m_alwon_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk cm_96m_fck = {
+       .name = "cm_96m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &cm_96m_fck_data,
+};
+
+static const char *omap_96m_fck_parents[] = {
+       "cm_96m_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux omap_96m_fck_data = {
+       .bit_shift = 6,
+       .num_parents = ARRAY_SIZE(omap_96m_fck_parents),
+       .reg = 0xd40,
+       .module = TI_CLKM_CM,
+       .parents = omap_96m_fck_parents,
+};
+
+static struct ti_clk omap_96m_fck = {
+       .name = "omap_96m_fck",
+       .type = TI_CLK_MUX,
+       .data = &omap_96m_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_96m_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_96m_fck = {
+       .name = "core_96m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_96m_fck_data,
+};
+
+static struct ti_clk_gate mspro_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 23,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mspro_fck = {
+       .name = "mspro_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mspro_fck_data,
+};
+
+static struct ti_clk_gate dss_ick_3430es2_data = {
+       .parent = "l4_ick",
+       .bit_shift = 0,
+       .reg = 0xe10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk dss_ick_3430es2 = {
+       .name = "dss_ick",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss_ick_3430es2_data,
+};
+
+static struct ti_clk_gate uart4_ick_am35xx_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 23,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart4_ick_am35xx = {
+       .name = "uart4_ick_am35xx",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart4_ick_am35xx_data,
+};
+
+static struct ti_clk_fixed_factor security_l4_ick2_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk security_l4_ick2 = {
+       .name = "security_l4_ick2",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &security_l4_ick2_data,
+};
+
+static struct ti_clk_gate aes1_ick_data = {
+       .parent = "security_l4_ick2",
+       .bit_shift = 3,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk aes1_ick = {
+       .name = "aes1_ick",
+       .type = TI_CLK_GATE,
+       .data = &aes1_ick_data,
+};
+
+static const char *dpll5_ck_parents[] = {
+       "sys_ck",
+       "sys_ck",
+};
+
+static struct ti_clk_dpll dpll5_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll5_ck_parents),
+       .control_reg = 0xd04,
+       .idlest_reg = 0xd24,
+       .mult_div1_reg = 0xd4c,
+       .autoidle_reg = 0xd34,
+       .module = TI_CLKM_CM,
+       .parents = dpll5_ck_parents,
+       .freqsel_mask = 0xf0,
+       .modes = 0x82,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x1,
+       .auto_recal_bit = 0x3,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x19,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x7,
+       .mult_mask = 0x7ff00,
+       .recal_st_bit = 0x19,
+       .autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll5_ck = {
+       .name = "dpll5_ck",
+       .clkdm_name = "dpll5_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll5_ck_data,
+};
+
+static struct ti_clk_divider dpll5_m2_ck_data = {
+       .parent = "dpll5_ck",
+       .max_div = 31,
+       .reg = 0xd50,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll5_m2_ck = {
+       .name = "dpll5_m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll5_m2_ck_data,
+};
+
+static struct ti_clk_gate usbhost_120m_fck_data = {
+       .parent = "dpll5_m2_ck",
+       .bit_shift = 1,
+       .reg = 0x1400,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk usbhost_120m_fck = {
+       .name = "usbhost_120m_fck",
+       .clkdm_name = "usbhost_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbhost_120m_fck_data,
+};
+
+static struct ti_clk_fixed_factor cm_96m_d2_fck_data = {
+       .parent = "cm_96m_fck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk cm_96m_d2_fck = {
+       .name = "cm_96m_d2_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &cm_96m_d2_fck_data,
+};
+
+static struct ti_clk_fixed sys_altclk_data = {
+       .frequency = 0x0,
+};
+
+static struct ti_clk sys_altclk = {
+       .name = "sys_altclk",
+       .type = TI_CLK_FIXED,
+       .data = &sys_altclk_data,
+};
+
+static const char *omap_48m_fck_parents[] = {
+       "cm_96m_d2_fck",
+       "sys_altclk",
+};
+
+static struct ti_clk_mux omap_48m_fck_data = {
+       .bit_shift = 3,
+       .num_parents = ARRAY_SIZE(omap_48m_fck_parents),
+       .reg = 0xd40,
+       .module = TI_CLKM_CM,
+       .parents = omap_48m_fck_parents,
+};
+
+static struct ti_clk omap_48m_fck = {
+       .name = "omap_48m_fck",
+       .type = TI_CLK_MUX,
+       .data = &omap_48m_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_48m_fck_data = {
+       .parent = "omap_48m_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_48m_fck = {
+       .name = "core_48m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_48m_fck_data,
+};
+
+static struct ti_clk_fixed mcbsp_clks_data = {
+       .frequency = 0x0,
+};
+
+static struct ti_clk mcbsp_clks = {
+       .name = "mcbsp_clks",
+       .type = TI_CLK_FIXED,
+       .data = &mcbsp_clks_data,
+};
+
+static struct ti_clk_gate mcbsp2_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 0,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed_factor per_96m_fck_data = {
+       .parent = "omap_96m_alwon_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk per_96m_fck = {
+       .name = "per_96m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &per_96m_fck_data,
+};
+
+static const char *mcbsp2_mux_fck_parents[] = {
+       "per_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp2_mux_fck_data = {
+       .bit_shift = 6,
+       .num_parents = ARRAY_SIZE(mcbsp2_mux_fck_parents),
+       .reg = 0x274,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp2_mux_fck_parents,
+};
+
+static struct ti_clk_composite mcbsp2_fck_data = {
+       .mux = &mcbsp2_mux_fck_data,
+       .gate = &mcbsp2_gate_fck_data,
+};
+
+static struct ti_clk mcbsp2_fck = {
+       .name = "mcbsp2_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp2_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll3_m2x2_ck_data = {
+       .parent = "dpll3_m2_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll3_m2x2_ck = {
+       .name = "dpll3_m2x2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll3_m2x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor corex2_fck_data = {
+       .parent = "dpll3_m2x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk corex2_fck = {
+       .name = "corex2_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &corex2_fck_data,
+};
+
+static struct ti_clk_gate ssi_ssr_gate_fck_3430es1_data = {
+       .parent = "corex2_fck",
+       .bit_shift = 0,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_NO_WAIT,
+};
+
+static int ssi_ssr_div_fck_3430es1_divs[] = {
+       0,
+       1,
+       2,
+       3,
+       4,
+       0,
+       6,
+       0,
+       8,
+};
+
+static struct ti_clk_divider ssi_ssr_div_fck_3430es1_data = {
+       .num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es1_divs),
+       .parent = "corex2_fck",
+       .bit_shift = 8,
+       .dividers = ssi_ssr_div_fck_3430es1_divs,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite ssi_ssr_fck_3430es1_data = {
+       .gate = &ssi_ssr_gate_fck_3430es1_data,
+       .divider = &ssi_ssr_div_fck_3430es1_data,
+};
+
+static struct ti_clk ssi_ssr_fck_3430es1 = {
+       .name = "ssi_ssr_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &ssi_ssr_fck_3430es1_data,
+};
+
+static struct ti_clk_fixed_factor ssi_sst_fck_3430es1_data = {
+       .parent = "ssi_ssr_fck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk ssi_sst_fck_3430es1 = {
+       .name = "ssi_sst_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &ssi_sst_fck_3430es1_data,
+};
+
+static struct ti_clk_fixed omap_32k_fck_data = {
+       .frequency = 32768,
+};
+
+static struct ti_clk omap_32k_fck = {
+       .name = "omap_32k_fck",
+       .type = TI_CLK_FIXED,
+       .data = &omap_32k_fck_data,
+};
+
+static struct ti_clk_fixed_factor per_32k_alwon_fck_data = {
+       .parent = "omap_32k_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk per_32k_alwon_fck = {
+       .name = "per_32k_alwon_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &per_32k_alwon_fck_data,
+};
+
+static struct ti_clk_gate gpio5_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 16,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio5_dbck = {
+       .name = "gpio5_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio5_dbck_data,
+};
+
+static struct ti_clk_gate gpt1_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 0,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt1_ick = {
+       .name = "gpt1_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt1_ick_data,
+};
+
+static struct ti_clk_gate mcspi3_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 20,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi3_fck = {
+       .name = "mcspi3_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi3_fck_data,
+};
+
+static struct ti_clk_gate gpt2_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 3,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt2_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt2_mux_fck_data = {
+       .num_parents = ARRAY_SIZE(gpt2_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt2_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt2_fck_data = {
+       .mux = &gpt2_mux_fck_data,
+       .gate = &gpt2_gate_fck_data,
+};
+
+static struct ti_clk gpt2_fck = {
+       .name = "gpt2_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt2_fck_data,
+};
+
+static struct ti_clk_gate gpt10_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 11,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt10_ick = {
+       .name = "gpt10_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt10_ick_data,
+};
+
+static struct ti_clk_gate uart2_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 14,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart2_fck = {
+       .name = "uart2_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart2_fck_data,
+};
+
+static struct ti_clk_fixed_factor sr_l4_ick_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk sr_l4_ick = {
+       .name = "sr_l4_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &sr_l4_ick_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d8_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 8,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_d8_fck = {
+       .name = "omap_96m_d8_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_d8_fck_data,
+};
+
+static struct ti_clk_divider dpll4_m5_ck_data = {
+       .parent = "dpll4_ck",
+       .max_div = 63,
+       .reg = 0xf40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m5_ck = {
+       .name = "dpll4_m5_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m5_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m5x2_mul_ck_data = {
+       .parent = "dpll4_m5_ck",
+       .div = 1,
+       .mult = 2,
+       .flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dpll4_m5x2_mul_ck = {
+       .name = "dpll4_m5x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m5x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m5x2_ck_data = {
+       .parent = "dpll4_m5x2_mul_ck",
+       .bit_shift = 0x1e,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m5x2_ck = {
+       .name = "dpll4_m5x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m5x2_ck_data,
+};
+
+static struct ti_clk_gate cam_mclk_data = {
+       .parent = "dpll4_m5x2_ck",
+       .bit_shift = 0,
+       .reg = 0xf00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk cam_mclk = {
+       .name = "cam_mclk",
+       .type = TI_CLK_GATE,
+       .data = &cam_mclk_data,
+};
+
+static struct ti_clk_gate mcbsp3_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 1,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *mcbsp3_mux_fck_parents[] = {
+       "per_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp3_mux_fck_data = {
+       .num_parents = ARRAY_SIZE(mcbsp3_mux_fck_parents),
+       .reg = 0x2d8,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp3_mux_fck_parents,
+};
+
+static struct ti_clk_composite mcbsp3_fck_data = {
+       .mux = &mcbsp3_mux_fck_data,
+       .gate = &mcbsp3_gate_fck_data,
+};
+
+static struct ti_clk mcbsp3_fck = {
+       .name = "mcbsp3_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp3_fck_data,
+};
+
+static struct ti_clk_gate csi2_96m_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 1,
+       .reg = 0xf00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk csi2_96m_fck = {
+       .name = "csi2_96m_fck",
+       .clkdm_name = "cam_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &csi2_96m_fck_data,
+};
+
+static struct ti_clk_gate gpt9_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 10,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt9_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt9_mux_fck_data = {
+       .bit_shift = 7,
+       .num_parents = ARRAY_SIZE(gpt9_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt9_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt9_fck_data = {
+       .mux = &gpt9_mux_fck_data,
+       .gate = &gpt9_gate_fck_data,
+};
+
+static struct ti_clk gpt9_fck = {
+       .name = "gpt9_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt9_fck_data,
+};
+
+static struct ti_clk_divider dpll3_m3_ck_data = {
+       .parent = "dpll3_ck",
+       .bit_shift = 16,
+       .max_div = 31,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll3_m3_ck = {
+       .name = "dpll3_m3_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll3_m3_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll3_m3x2_mul_ck_data = {
+       .parent = "dpll3_m3_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll3_m3x2_mul_ck = {
+       .name = "dpll3_m3x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll3_m3x2_mul_ck_data,
+};
+
+static struct ti_clk_gate sr2_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 7,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk sr2_fck = {
+       .name = "sr2_fck",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sr2_fck_data,
+};
+
+static struct ti_clk_fixed pclk_ck_data = {
+       .frequency = 27000000,
+};
+
+static struct ti_clk pclk_ck = {
+       .name = "pclk_ck",
+       .type = TI_CLK_FIXED,
+       .data = &pclk_ck_data,
+};
+
+static struct ti_clk_gate wdt2_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 5,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk wdt2_ick = {
+       .name = "wdt2_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt2_ick_data,
+};
+
+static struct ti_clk_fixed_factor core_l3_ick_data = {
+       .parent = "l3_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_l3_ick = {
+       .name = "core_l3_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_l3_ick_data,
+};
+
+static struct ti_clk_gate mcspi4_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 21,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi4_fck = {
+       .name = "mcspi4_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi4_fck_data,
+};
+
+static struct ti_clk_fixed_factor per_48m_fck_data = {
+       .parent = "omap_48m_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk per_48m_fck = {
+       .name = "per_48m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &per_48m_fck_data,
+};
+
+static struct ti_clk_gate uart4_fck_data = {
+       .parent = "per_48m_fck",
+       .bit_shift = 18,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart4_fck = {
+       .name = "uart4_fck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart4_fck_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d10_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 10,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_d10_fck = {
+       .name = "omap_96m_d10_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_d10_fck_data,
+};
+
+static struct ti_clk_gate usim_gate_fck_data = {
+       .parent = "omap_96m_fck",
+       .bit_shift = 9,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed_factor per_l4_ick_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk per_l4_ick = {
+       .name = "per_l4_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &per_l4_ick_data,
+};
+
+static struct ti_clk_gate gpt5_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 6,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt5_ick = {
+       .name = "gpt5_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt5_ick_data,
+};
+
+static struct ti_clk_gate mcspi2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 19,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi2_ick = {
+       .name = "mcspi2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi2_ick_data,
+};
+
+static struct ti_clk_fixed_factor ssi_l4_ick_data = {
+       .parent = "l4_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk ssi_l4_ick = {
+       .name = "ssi_l4_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &ssi_l4_ick_data,
+};
+
+static struct ti_clk_gate ssi_ick_3430es1_data = {
+       .parent = "ssi_l4_ick",
+       .bit_shift = 0,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk ssi_ick_3430es1 = {
+       .name = "ssi_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &ssi_ick_3430es1_data,
+};
+
+static struct ti_clk_gate i2c2_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 16,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk i2c2_fck = {
+       .name = "i2c2_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c2_fck_data,
+};
+
+static struct ti_clk_divider dpll1_fck_data = {
+       .parent = "core_ck",
+       .bit_shift = 19,
+       .max_div = 7,
+       .reg = 0x940,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll1_fck = {
+       .name = "dpll1_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll1_fck_data,
+};
+
+static const char *dpll1_ck_parents[] = {
+       "sys_ck",
+       "dpll1_fck",
+};
+
+static struct ti_clk_dpll dpll1_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll1_ck_parents),
+       .control_reg = 0x904,
+       .idlest_reg = 0x924,
+       .mult_div1_reg = 0x940,
+       .autoidle_reg = 0x934,
+       .module = TI_CLKM_CM,
+       .parents = dpll1_ck_parents,
+       .freqsel_mask = 0xf0,
+       .modes = 0xa0,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x1,
+       .auto_recal_bit = 0x3,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x7,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x7,
+       .mult_mask = 0x7ff00,
+       .recal_st_bit = 0x7,
+       .autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll1_ck = {
+       .name = "dpll1_ck",
+       .clkdm_name = "dpll1_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll1_ck_data,
+};
+
+static struct ti_clk_fixed secure_32k_fck_data = {
+       .frequency = 32768,
+};
+
+static struct ti_clk secure_32k_fck = {
+       .name = "secure_32k_fck",
+       .type = TI_CLK_FIXED,
+       .data = &secure_32k_fck_data,
+};
+
+static struct ti_clk_gate gpio5_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 16,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio5_ick = {
+       .name = "gpio5_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio5_ick_data,
+};
+
+static struct ti_clk_divider dpll4_m4_ck_data = {
+       .parent = "dpll4_ck",
+       .max_div = 32,
+       .reg = 0xe40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m4_ck = {
+       .name = "dpll4_m4_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m4_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m4x2_mul_ck_data = {
+       .parent = "dpll4_m4_ck",
+       .div = 1,
+       .mult = 2,
+       .flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dpll4_m4x2_mul_ck = {
+       .name = "dpll4_m4x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m4x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m4x2_ck_data = {
+       .parent = "dpll4_m4x2_mul_ck",
+       .bit_shift = 0x1d,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m4x2_ck = {
+       .name = "dpll4_m4x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m4x2_ck_data,
+};
+
+static struct ti_clk_gate dss1_alwon_fck_3430es2_data = {
+       .parent = "dpll4_m4x2_ck",
+       .bit_shift = 0,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_DSS | CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dss1_alwon_fck_3430es2 = {
+       .name = "dss1_alwon_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss1_alwon_fck_3430es2_data,
+};
+
+static struct ti_clk_gate uart3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 11,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart3_ick = {
+       .name = "uart3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart3_ick_data,
+};
+
+static struct ti_clk_divider dpll4_m3_ck_data = {
+       .parent = "dpll4_ck",
+       .bit_shift = 8,
+       .max_div = 32,
+       .reg = 0xe40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m3_ck = {
+       .name = "dpll4_m3_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m3_ck_data,
+};
+
+static struct ti_clk_gate mcbsp3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 1,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp3_ick = {
+       .name = "mcbsp3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp3_ick_data,
+};
+
+static struct ti_clk_gate gpio3_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 14,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio3_dbck = {
+       .name = "gpio3_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio3_dbck_data,
+};
+
+static struct ti_clk_gate fac_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 8,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk fac_ick = {
+       .name = "fac_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &fac_ick_data,
+};
+
+static struct ti_clk_gate clkout2_src_gate_ck_data = {
+       .parent = "core_ck",
+       .bit_shift = 7,
+       .reg = 0xd70,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_NO_WAIT,
+};
+
+static struct ti_clk_fixed_factor dpll4_m3x2_mul_ck_data = {
+       .parent = "dpll4_m3_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll4_m3x2_mul_ck = {
+       .name = "dpll4_m3x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m3x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m3x2_ck_data = {
+       .parent = "dpll4_m3x2_mul_ck",
+       .bit_shift = 0x1c,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m3x2_ck = {
+       .name = "dpll4_m3x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m3x2_ck_data,
+};
+
+static const char *omap_54m_fck_parents[] = {
+       "dpll4_m3x2_ck",
+       "sys_altclk",
+};
+
+static struct ti_clk_mux omap_54m_fck_data = {
+       .bit_shift = 5,
+       .num_parents = ARRAY_SIZE(omap_54m_fck_parents),
+       .reg = 0xd40,
+       .module = TI_CLKM_CM,
+       .parents = omap_54m_fck_parents,
+};
+
+static struct ti_clk omap_54m_fck = {
+       .name = "omap_54m_fck",
+       .type = TI_CLK_MUX,
+       .data = &omap_54m_fck_data,
+};
+
+static const char *clkout2_src_mux_ck_parents[] = {
+       "core_ck",
+       "sys_ck",
+       "cm_96m_fck",
+       "omap_54m_fck",
+};
+
+static struct ti_clk_mux clkout2_src_mux_ck_data = {
+       .num_parents = ARRAY_SIZE(clkout2_src_mux_ck_parents),
+       .reg = 0xd70,
+       .module = TI_CLKM_CM,
+       .parents = clkout2_src_mux_ck_parents,
+};
+
+static struct ti_clk_composite clkout2_src_ck_data = {
+       .mux = &clkout2_src_mux_ck_data,
+       .gate = &clkout2_src_gate_ck_data,
+};
+
+static struct ti_clk clkout2_src_ck = {
+       .name = "clkout2_src_ck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &clkout2_src_ck_data,
+};
+
+static struct ti_clk_gate i2c1_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 15,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk i2c1_fck = {
+       .name = "i2c1_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c1_fck_data,
+};
+
+static struct ti_clk_gate wdt3_fck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 12,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk wdt3_fck = {
+       .name = "wdt3_fck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt3_fck_data,
+};
+
+static struct ti_clk_gate gpt7_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 8,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt7_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt7_mux_fck_data = {
+       .bit_shift = 5,
+       .num_parents = ARRAY_SIZE(gpt7_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt7_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt7_fck_data = {
+       .mux = &gpt7_mux_fck_data,
+       .gate = &gpt7_gate_fck_data,
+};
+
+static struct ti_clk gpt7_fck = {
+       .name = "gpt7_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt7_fck_data,
+};
+
+static struct ti_clk_gate usb_l4_gate_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 5,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INTERFACE,
+};
+
+static struct ti_clk_divider usb_l4_div_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 4,
+       .max_div = 1,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk_composite usb_l4_ick_data = {
+       .gate = &usb_l4_gate_ick_data,
+       .divider = &usb_l4_div_ick_data,
+};
+
+static struct ti_clk usb_l4_ick = {
+       .name = "usb_l4_ick",
+       .type = TI_CLK_COMPOSITE,
+       .data = &usb_l4_ick_data,
+};
+
+static struct ti_clk_gate uart4_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 18,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart4_ick = {
+       .name = "uart4_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart4_ick_data,
+};
+
+static struct ti_clk_fixed dummy_ck_data = {
+       .frequency = 0,
+};
+
+static struct ti_clk dummy_ck = {
+       .name = "dummy_ck",
+       .type = TI_CLK_FIXED,
+       .data = &dummy_ck_data,
+};
+
+static const char *gpt3_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt3_mux_fck_data = {
+       .bit_shift = 1,
+       .num_parents = ARRAY_SIZE(gpt3_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt3_mux_fck_parents,
+};
+
+static struct ti_clk_gate gpt9_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 10,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt9_ick = {
+       .name = "gpt9_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt9_ick_data,
+};
+
+static struct ti_clk_gate gpt10_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 11,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate dss_ick_3430es1_data = {
+       .parent = "l4_ick",
+       .bit_shift = 0,
+       .reg = 0xe10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk dss_ick_3430es1 = {
+       .name = "dss_ick",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss_ick_3430es1_data,
+};
+
+static struct ti_clk_gate gpt11_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 12,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt11_ick = {
+       .name = "gpt11_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt11_ick_data,
+};
+
+static struct ti_clk_divider dpll2_fck_data = {
+       .parent = "core_ck",
+       .bit_shift = 19,
+       .max_div = 7,
+       .reg = 0x40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll2_fck = {
+       .name = "dpll2_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll2_fck_data,
+};
+
+static struct ti_clk_gate uart1_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 13,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart1_fck = {
+       .name = "uart1_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart1_fck_data,
+};
+
+static struct ti_clk_gate hsotgusb_ick_3430es1_data = {
+       .parent = "core_l3_ick",
+       .bit_shift = 4,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk hsotgusb_ick_3430es1 = {
+       .name = "hsotgusb_ick_3430es1",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hsotgusb_ick_3430es1_data,
+};
+
+static struct ti_clk_gate gpio2_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 13,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio2_ick = {
+       .name = "gpio2_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio2_ick_data,
+};
+
+static struct ti_clk_gate mmchs1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 24,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mmchs1_ick = {
+       .name = "mmchs1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs1_ick_data,
+};
+
+static struct ti_clk_gate modem_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 31,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk modem_fck = {
+       .name = "modem_fck",
+       .clkdm_name = "d2d_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &modem_fck_data,
+};
+
+static struct ti_clk_gate mcbsp4_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 2,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp4_ick = {
+       .name = "mcbsp4_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp4_ick_data,
+};
+
+static struct ti_clk_gate gpio1_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 3,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio1_ick = {
+       .name = "gpio1_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio1_ick_data,
+};
+
+static const char *gpt6_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt6_mux_fck_data = {
+       .bit_shift = 4,
+       .num_parents = ARRAY_SIZE(gpt6_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt6_mux_fck_parents,
+};
+
+static struct ti_clk_fixed_factor dpll1_x2_ck_data = {
+       .parent = "dpll1_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll1_x2_ck = {
+       .name = "dpll1_x2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll1_x2_ck_data,
+};
+
+static struct ti_clk_divider dpll1_x2m2_ck_data = {
+       .parent = "dpll1_x2_ck",
+       .max_div = 31,
+       .reg = 0x944,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll1_x2m2_ck = {
+       .name = "dpll1_x2m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll1_x2m2_ck_data,
+};
+
+static struct ti_clk_fixed_factor mpu_ck_data = {
+       .parent = "dpll1_x2m2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk mpu_ck = {
+       .name = "mpu_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &mpu_ck_data,
+};
+
+static struct ti_clk_divider arm_fck_data = {
+       .parent = "mpu_ck",
+       .max_div = 2,
+       .reg = 0x924,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk arm_fck = {
+       .name = "arm_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &arm_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_d3_ck_data = {
+       .parent = "core_ck",
+       .div = 3,
+       .mult = 1,
+};
+
+static struct ti_clk core_d3_ck = {
+       .name = "core_d3_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_d3_ck_data,
+};
+
+static struct ti_clk_gate gpt11_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 12,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt11_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt11_mux_fck_data = {
+       .bit_shift = 7,
+       .num_parents = ARRAY_SIZE(gpt11_mux_fck_parents),
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .parents = gpt11_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt11_fck_data = {
+       .mux = &gpt11_mux_fck_data,
+       .gate = &gpt11_gate_fck_data,
+};
+
+static struct ti_clk gpt11_fck = {
+       .name = "gpt11_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt11_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_d6_ck_data = {
+       .parent = "core_ck",
+       .div = 6,
+       .mult = 1,
+};
+
+static struct ti_clk core_d6_ck = {
+       .name = "core_d6_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_d6_ck_data,
+};
+
+static struct ti_clk_gate uart4_fck_am35xx_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 23,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart4_fck_am35xx = {
+       .name = "uart4_fck_am35xx",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart4_fck_am35xx_data,
+};
+
+static struct ti_clk_gate dpll3_m3x2_ck_data = {
+       .parent = "dpll3_m3x2_mul_ck",
+       .bit_shift = 0xc,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll3_m3x2_ck = {
+       .name = "dpll3_m3x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll3_m3x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor emu_core_alwon_ck_data = {
+       .parent = "dpll3_m3x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk emu_core_alwon_ck = {
+       .name = "emu_core_alwon_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &emu_core_alwon_ck_data,
+};
+
+static struct ti_clk_divider dpll4_m6_ck_data = {
+       .parent = "dpll4_ck",
+       .bit_shift = 24,
+       .max_div = 63,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll4_m6_ck = {
+       .name = "dpll4_m6_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll4_m6_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_m6x2_mul_ck_data = {
+       .parent = "dpll4_m6_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll4_m6x2_mul_ck = {
+       .name = "dpll4_m6x2_mul_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_m6x2_mul_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m6x2_ck_data = {
+       .parent = "dpll4_m6x2_mul_ck",
+       .bit_shift = 0x1f,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m6x2_ck = {
+       .name = "dpll4_m6x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m6x2_ck_data,
+};
+
+static struct ti_clk_fixed_factor emu_per_alwon_ck_data = {
+       .parent = "dpll4_m6x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk emu_per_alwon_ck = {
+       .name = "emu_per_alwon_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &emu_per_alwon_ck_data,
+};
+
+static struct ti_clk_fixed_factor emu_mpu_alwon_ck_data = {
+       .parent = "mpu_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk emu_mpu_alwon_ck = {
+       .name = "emu_mpu_alwon_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &emu_mpu_alwon_ck_data,
+};
+
+static const char *emu_src_mux_ck_parents[] = {
+       "sys_ck",
+       "emu_core_alwon_ck",
+       "emu_per_alwon_ck",
+       "emu_mpu_alwon_ck",
+};
+
+static struct ti_clk_mux emu_src_mux_ck_data = {
+       .num_parents = ARRAY_SIZE(emu_src_mux_ck_parents),
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .parents = emu_src_mux_ck_parents,
+};
+
+static struct ti_clk emu_src_mux_ck = {
+       .name = "emu_src_mux_ck",
+       .type = TI_CLK_MUX,
+       .data = &emu_src_mux_ck_data,
+};
+
+static struct ti_clk_gate emu_src_ck_data = {
+       .parent = "emu_src_mux_ck",
+       .flags = CLKF_CLKDM,
+};
+
+static struct ti_clk emu_src_ck = {
+       .name = "emu_src_ck",
+       .clkdm_name = "emu_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &emu_src_ck_data,
+};
+
+static struct ti_clk_divider atclk_fck_data = {
+       .parent = "emu_src_ck",
+       .bit_shift = 4,
+       .max_div = 3,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk atclk_fck = {
+       .name = "atclk_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &atclk_fck_data,
+};
+
+static struct ti_clk_gate ipss_ick_data = {
+       .parent = "core_l3_ick",
+       .bit_shift = 4,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_AM35XX | CLKF_INTERFACE,
+};
+
+static struct ti_clk ipss_ick = {
+       .name = "ipss_ick",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &ipss_ick_data,
+};
+
+static struct ti_clk_gate emac_ick_data = {
+       .parent = "ipss_ick",
+       .bit_shift = 1,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+       .flags = CLKF_AM35XX,
+};
+
+static struct ti_clk emac_ick = {
+       .name = "emac_ick",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &emac_ick_data,
+};
+
+static struct ti_clk_gate vpfe_ick_data = {
+       .parent = "ipss_ick",
+       .bit_shift = 2,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+       .flags = CLKF_AM35XX,
+};
+
+static struct ti_clk vpfe_ick = {
+       .name = "vpfe_ick",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &vpfe_ick_data,
+};
+
+static const char *dpll2_ck_parents[] = {
+       "sys_ck",
+       "dpll2_fck",
+};
+
+static struct ti_clk_dpll dpll2_ck_data = {
+       .num_parents = ARRAY_SIZE(dpll2_ck_parents),
+       .control_reg = 0x4,
+       .idlest_reg = 0x24,
+       .mult_div1_reg = 0x40,
+       .autoidle_reg = 0x34,
+       .module = TI_CLKM_CM,
+       .parents = dpll2_ck_parents,
+       .freqsel_mask = 0xf0,
+       .modes = 0xa2,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x1,
+       .auto_recal_bit = 0x3,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x8,
+       .max_multiplier = 0x7ff,
+       .enable_mask = 0x7,
+       .mult_mask = 0x7ff00,
+       .recal_st_bit = 0x8,
+       .autoidle_mask = 0x7,
+};
+
+static struct ti_clk dpll2_ck = {
+       .name = "dpll2_ck",
+       .clkdm_name = "dpll2_clkdm",
+       .type = TI_CLK_DPLL,
+       .data = &dpll2_ck_data,
+};
+
+static struct ti_clk_divider dpll2_m2_ck_data = {
+       .parent = "dpll2_ck",
+       .max_div = 31,
+       .reg = 0x44,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk dpll2_m2_ck = {
+       .name = "dpll2_m2_ck",
+       .type = TI_CLK_DIVIDER,
+       .data = &dpll2_m2_ck_data,
+};
+
+static const char *mcbsp4_mux_fck_parents[] = {
+       "per_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp4_mux_fck_data = {
+       .bit_shift = 2,
+       .num_parents = ARRAY_SIZE(mcbsp4_mux_fck_parents),
+       .reg = 0x2d8,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp4_mux_fck_parents,
+};
+
+static const char *mcbsp1_mux_fck_parents[] = {
+       "core_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp1_mux_fck_data = {
+       .bit_shift = 2,
+       .num_parents = ARRAY_SIZE(mcbsp1_mux_fck_parents),
+       .reg = 0x274,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp1_mux_fck_parents,
+};
+
+static struct ti_clk_gate gpt8_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 9,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate gpt8_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 9,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt8_ick = {
+       .name = "gpt8_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt8_ick_data,
+};
+
+static const char *gpt10_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt10_mux_fck_data = {
+       .bit_shift = 6,
+       .num_parents = ARRAY_SIZE(gpt10_mux_fck_parents),
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+       .parents = gpt10_mux_fck_parents,
+};
+
+static struct ti_clk_gate mmchs3_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 30,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mmchs3_ick = {
+       .name = "mmchs3_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs3_ick_data,
+};
+
+static struct ti_clk_gate gpio3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 14,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio3_ick = {
+       .name = "gpio3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio3_ick_data,
+};
+
+static const char *traceclk_src_fck_parents[] = {
+       "sys_ck",
+       "emu_core_alwon_ck",
+       "emu_per_alwon_ck",
+       "emu_mpu_alwon_ck",
+};
+
+static struct ti_clk_mux traceclk_src_fck_data = {
+       .bit_shift = 2,
+       .num_parents = ARRAY_SIZE(traceclk_src_fck_parents),
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .parents = traceclk_src_fck_parents,
+};
+
+static struct ti_clk traceclk_src_fck = {
+       .name = "traceclk_src_fck",
+       .type = TI_CLK_MUX,
+       .data = &traceclk_src_fck_data,
+};
+
+static struct ti_clk_divider traceclk_fck_data = {
+       .parent = "traceclk_src_fck",
+       .bit_shift = 11,
+       .max_div = 7,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk traceclk_fck = {
+       .name = "traceclk_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &traceclk_fck_data,
+};
+
+static struct ti_clk_gate mcbsp5_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 10,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate sad2d_ick_data = {
+       .parent = "l3_ick",
+       .bit_shift = 3,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk sad2d_ick = {
+       .name = "sad2d_ick",
+       .clkdm_name = "d2d_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sad2d_ick_data,
+};
+
+static const char *gpt1_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt1_mux_fck_data = {
+       .num_parents = ARRAY_SIZE(gpt1_mux_fck_parents),
+       .reg = 0xc40,
+       .module = TI_CLKM_CM,
+       .parents = gpt1_mux_fck_parents,
+};
+
+static struct ti_clk_gate hecc_ck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 3,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+       .flags = CLKF_AM35XX,
+};
+
+static struct ti_clk hecc_ck = {
+       .name = "hecc_ck",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hecc_ck_data,
+};
+
+static struct ti_clk_gate gpt1_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 0,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite gpt1_fck_data = {
+       .mux = &gpt1_mux_fck_data,
+       .gate = &gpt1_gate_fck_data,
+};
+
+static struct ti_clk gpt1_fck = {
+       .name = "gpt1_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt1_fck_data,
+};
+
+static struct ti_clk_gate dpll4_m2x2_ck_omap36xx_data = {
+       .parent = "dpll4_m2x2_mul_ck",
+       .bit_shift = 0x1b,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m2x2_ck_omap36xx = {
+       .name = "dpll4_m2x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m2x2_ck_omap36xx_data,
+       .patch = &dpll4_m2x2_ck,
+};
+
+static struct ti_clk_divider gfx_l3_fck_data = {
+       .parent = "l3_ick",
+       .max_div = 7,
+       .reg = 0xb40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk gfx_l3_fck = {
+       .name = "gfx_l3_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &gfx_l3_fck_data,
+};
+
+static struct ti_clk_gate gfx_cg1_ck_data = {
+       .parent = "gfx_l3_fck",
+       .bit_shift = 1,
+       .reg = 0xb00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk gfx_cg1_ck = {
+       .name = "gfx_cg1_ck",
+       .clkdm_name = "gfx_3430es1_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gfx_cg1_ck_data,
+};
+
+static struct ti_clk_gate mailboxes_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 7,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mailboxes_ick = {
+       .name = "mailboxes_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mailboxes_ick_data,
+};
+
+static struct ti_clk_gate sha11_ick_data = {
+       .parent = "security_l4_ick2",
+       .bit_shift = 1,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk sha11_ick = {
+       .name = "sha11_ick",
+       .type = TI_CLK_GATE,
+       .data = &sha11_ick_data,
+};
+
+static struct ti_clk_gate hsotgusb_ick_am35xx_data = {
+       .parent = "ipss_ick",
+       .bit_shift = 0,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+       .flags = CLKF_AM35XX,
+};
+
+static struct ti_clk hsotgusb_ick_am35xx = {
+       .name = "hsotgusb_ick_am35xx",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hsotgusb_ick_am35xx_data,
+};
+
+static struct ti_clk_gate mmchs3_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 30,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mmchs3_fck = {
+       .name = "mmchs3_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs3_fck_data,
+};
+
+static struct ti_clk_divider pclk_fck_data = {
+       .parent = "emu_src_ck",
+       .bit_shift = 8,
+       .max_div = 7,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk pclk_fck = {
+       .name = "pclk_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &pclk_fck_data,
+};
+
+static const char *dpll4_ck_omap36xx_parents[] = {
+       "sys_ck",
+       "sys_ck",
+};
+
+static struct ti_clk_dpll dpll4_ck_omap36xx_data = {
+       .num_parents = ARRAY_SIZE(dpll4_ck_omap36xx_parents),
+       .control_reg = 0xd00,
+       .idlest_reg = 0xd20,
+       .mult_div1_reg = 0xd44,
+       .autoidle_reg = 0xd30,
+       .module = TI_CLKM_CM,
+       .parents = dpll4_ck_omap36xx_parents,
+       .modes = 0x82,
+       .div1_mask = 0x7f,
+       .idlest_mask = 0x2,
+       .auto_recal_bit = 0x13,
+       .max_divider = 0x80,
+       .min_divider = 0x1,
+       .recal_en_bit = 0x6,
+       .max_multiplier = 0xfff,
+       .enable_mask = 0x70000,
+       .mult_mask = 0xfff00,
+       .recal_st_bit = 0x6,
+       .autoidle_mask = 0x38,
+       .sddiv_mask = 0xff000000,
+       .dco_mask = 0xe00000,
+       .flags = CLKF_PER | CLKF_J_TYPE,
+};
+
+static struct ti_clk dpll4_ck_omap36xx = {
+       .name = "dpll4_ck",
+       .type = TI_CLK_DPLL,
+       .data = &dpll4_ck_omap36xx_data,
+       .patch = &dpll4_ck,
+};
+
+static struct ti_clk_gate uart3_fck_data = {
+       .parent = "per_48m_fck",
+       .bit_shift = 11,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk uart3_fck = {
+       .name = "uart3_fck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart3_fck_data,
+};
+
+static struct ti_clk_fixed_factor wkup_32k_fck_data = {
+       .parent = "omap_32k_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk wkup_32k_fck = {
+       .name = "wkup_32k_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &wkup_32k_fck_data,
+};
+
+static struct ti_clk_gate sys_clkout1_data = {
+       .parent = "osc_sys_ck",
+       .bit_shift = 7,
+       .reg = 0xd70,
+       .module = TI_CLKM_PRM,
+};
+
+static struct ti_clk sys_clkout1 = {
+       .name = "sys_clkout1",
+       .type = TI_CLK_GATE,
+       .data = &sys_clkout1_data,
+};
+
+static struct ti_clk_fixed_factor gpmc_fck_data = {
+       .parent = "core_l3_ick",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk gpmc_fck = {
+       .name = "gpmc_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &gpmc_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d20_ck_data = {
+       .parent = "dpll5_m2_ck",
+       .div = 20,
+       .mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d20_ck = {
+       .name = "dpll5_m2_d20_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll5_m2_d20_ck_data,
+};
+
+static struct ti_clk_gate dpll4_m5x2_ck_omap36xx_data = {
+       .parent = "dpll4_m5x2_mul_ck",
+       .bit_shift = 0x1e,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_RATE_PARENT | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m5x2_ck_omap36xx = {
+       .name = "dpll4_m5x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m5x2_ck_omap36xx_data,
+       .patch = &dpll4_m5x2_ck,
+};
+
+static struct ti_clk_gate ssi_ssr_gate_fck_3430es2_data = {
+       .parent = "corex2_fck",
+       .bit_shift = 0,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_NO_WAIT,
+};
+
+static struct ti_clk_gate uart1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 13,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart1_ick = {
+       .name = "uart1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart1_ick_data,
+};
+
+static struct ti_clk_gate iva2_ck_data = {
+       .parent = "dpll2_m2_ck",
+       .bit_shift = 0,
+       .reg = 0x0,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk iva2_ck = {
+       .name = "iva2_ck",
+       .clkdm_name = "iva2_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &iva2_ck_data,
+};
+
+static struct ti_clk_gate pka_ick_data = {
+       .parent = "security_l3_ick",
+       .bit_shift = 4,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk pka_ick = {
+       .name = "pka_ick",
+       .type = TI_CLK_GATE,
+       .data = &pka_ick_data,
+};
+
+static struct ti_clk_gate gpt12_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 1,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt12_ick = {
+       .name = "gpt12_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt12_ick_data,
+};
+
+static const char *mcbsp5_mux_fck_parents[] = {
+       "core_96m_fck",
+       "mcbsp_clks",
+};
+
+static struct ti_clk_mux mcbsp5_mux_fck_data = {
+       .bit_shift = 4,
+       .num_parents = ARRAY_SIZE(mcbsp5_mux_fck_parents),
+       .reg = 0x2d8,
+       .module = TI_CLKM_SCRM,
+       .parents = mcbsp5_mux_fck_parents,
+};
+
+static struct ti_clk_composite mcbsp5_fck_data = {
+       .mux = &mcbsp5_mux_fck_data,
+       .gate = &mcbsp5_gate_fck_data,
+};
+
+static struct ti_clk mcbsp5_fck = {
+       .name = "mcbsp5_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp5_fck_data,
+};
+
+static struct ti_clk_gate usbhost_48m_fck_data = {
+       .parent = "omap_48m_fck",
+       .bit_shift = 0,
+       .reg = 0x1400,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_DSS,
+};
+
+static struct ti_clk usbhost_48m_fck = {
+       .name = "usbhost_48m_fck",
+       .clkdm_name = "usbhost_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbhost_48m_fck_data,
+};
+
+static struct ti_clk_gate des1_ick_data = {
+       .parent = "security_l4_ick2",
+       .bit_shift = 0,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk des1_ick = {
+       .name = "des1_ick",
+       .type = TI_CLK_GATE,
+       .data = &des1_ick_data,
+};
+
+static struct ti_clk_gate sgx_gate_fck_data = {
+       .parent = "core_ck",
+       .bit_shift = 1,
+       .reg = 0xb00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed_factor core_d4_ck_data = {
+       .parent = "core_ck",
+       .div = 4,
+       .mult = 1,
+};
+
+static struct ti_clk core_d4_ck = {
+       .name = "core_d4_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_d4_ck_data,
+};
+
+static struct ti_clk_fixed_factor omap_192m_alwon_fck_data = {
+       .parent = "dpll4_m2x2_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk omap_192m_alwon_fck = {
+       .name = "omap_192m_alwon_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_192m_alwon_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_d2_ck_data = {
+       .parent = "core_ck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk core_d2_ck = {
+       .name = "core_d2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_d2_ck_data,
+};
+
+static struct ti_clk_fixed_factor corex2_d3_fck_data = {
+       .parent = "corex2_fck",
+       .div = 3,
+       .mult = 1,
+};
+
+static struct ti_clk corex2_d3_fck = {
+       .name = "corex2_d3_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &corex2_d3_fck_data,
+};
+
+static struct ti_clk_fixed_factor corex2_d5_fck_data = {
+       .parent = "corex2_fck",
+       .div = 5,
+       .mult = 1,
+};
+
+static struct ti_clk corex2_d5_fck = {
+       .name = "corex2_d5_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &corex2_d5_fck_data,
+};
+
+static const char *sgx_mux_fck_parents[] = {
+       "core_d3_ck",
+       "core_d4_ck",
+       "core_d6_ck",
+       "cm_96m_fck",
+       "omap_192m_alwon_fck",
+       "core_d2_ck",
+       "corex2_d3_fck",
+       "corex2_d5_fck",
+};
+
+static struct ti_clk_mux sgx_mux_fck_data = {
+       .num_parents = ARRAY_SIZE(sgx_mux_fck_parents),
+       .reg = 0xb40,
+       .module = TI_CLKM_CM,
+       .parents = sgx_mux_fck_parents,
+};
+
+static struct ti_clk_composite sgx_fck_data = {
+       .mux = &sgx_mux_fck_data,
+       .gate = &sgx_gate_fck_data,
+};
+
+static struct ti_clk sgx_fck = {
+       .name = "sgx_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &sgx_fck_data,
+};
+
+static struct ti_clk_gate mcspi1_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 18,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi1_fck = {
+       .name = "mcspi1_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi1_fck_data,
+};
+
+static struct ti_clk_gate mmchs2_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 25,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mmchs2_fck = {
+       .name = "mmchs2_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs2_fck_data,
+};
+
+static struct ti_clk_gate mcspi2_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 19,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mcspi2_fck = {
+       .name = "mcspi2_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi2_fck_data,
+};
+
+static struct ti_clk_gate vpfe_fck_data = {
+       .parent = "pclk_ck",
+       .bit_shift = 10,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+};
+
+static struct ti_clk vpfe_fck = {
+       .name = "vpfe_fck",
+       .type = TI_CLK_GATE,
+       .data = &vpfe_fck_data,
+};
+
+static struct ti_clk_gate gpt4_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 5,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate mcbsp1_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 9,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_gate gpt5_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 6,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static const char *gpt5_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt5_mux_fck_data = {
+       .bit_shift = 3,
+       .num_parents = ARRAY_SIZE(gpt5_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt5_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt5_fck_data = {
+       .mux = &gpt5_mux_fck_data,
+       .gate = &gpt5_gate_fck_data,
+};
+
+static struct ti_clk gpt5_fck = {
+       .name = "gpt5_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt5_fck_data,
+};
+
+static struct ti_clk_gate ts_fck_data = {
+       .parent = "omap_32k_fck",
+       .bit_shift = 1,
+       .reg = 0xa08,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk ts_fck = {
+       .name = "ts_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &ts_fck_data,
+};
+
+static struct ti_clk_fixed_factor wdt1_fck_data = {
+       .parent = "secure_32k_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk wdt1_fck = {
+       .name = "wdt1_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &wdt1_fck_data,
+};
+
+static struct ti_clk_gate dpll4_m6x2_ck_omap36xx_data = {
+       .parent = "dpll4_m6x2_mul_ck",
+       .bit_shift = 0x1f,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m6x2_ck_omap36xx = {
+       .name = "dpll4_m6x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m6x2_ck_omap36xx_data,
+       .patch = &dpll4_m6x2_ck,
+};
+
+static const char *gpt4_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt4_mux_fck_data = {
+       .bit_shift = 2,
+       .num_parents = ARRAY_SIZE(gpt4_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt4_mux_fck_parents,
+};
+
+static struct ti_clk_gate usbhost_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 0,
+       .reg = 0x1410,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_DSS | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk usbhost_ick = {
+       .name = "usbhost_ick",
+       .clkdm_name = "usbhost_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbhost_ick_data,
+};
+
+static struct ti_clk_gate mcbsp2_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 0,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp2_ick = {
+       .name = "mcbsp2_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp2_ick_data,
+};
+
+static struct ti_clk_gate omapctrl_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 6,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk omapctrl_ick = {
+       .name = "omapctrl_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &omapctrl_ick_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d4_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 4,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_d4_fck = {
+       .name = "omap_96m_d4_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_d4_fck_data,
+};
+
+static struct ti_clk_gate gpt6_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 7,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt6_ick = {
+       .name = "gpt6_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt6_ick_data,
+};
+
+static struct ti_clk_gate dpll3_m3x2_ck_omap36xx_data = {
+       .parent = "dpll3_m3x2_mul_ck",
+       .bit_shift = 0xc,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll3_m3x2_ck_omap36xx = {
+       .name = "dpll3_m3x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll3_m3x2_ck_omap36xx_data,
+       .patch = &dpll3_m3x2_ck,
+};
+
+static struct ti_clk_gate i2c3_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 17,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk i2c3_ick = {
+       .name = "i2c3_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c3_ick_data,
+};
+
+static struct ti_clk_gate gpio6_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 17,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio6_ick = {
+       .name = "gpio6_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio6_ick_data,
+};
+
+static struct ti_clk_gate mspro_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 23,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mspro_ick = {
+       .name = "mspro_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mspro_ick_data,
+};
+
+static struct ti_clk_composite mcbsp1_fck_data = {
+       .mux = &mcbsp1_mux_fck_data,
+       .gate = &mcbsp1_gate_fck_data,
+};
+
+static struct ti_clk mcbsp1_fck = {
+       .name = "mcbsp1_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp1_fck_data,
+};
+
+static struct ti_clk_gate gpt3_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 4,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_fixed rmii_ck_data = {
+       .frequency = 50000000,
+};
+
+static struct ti_clk rmii_ck = {
+       .name = "rmii_ck",
+       .type = TI_CLK_FIXED,
+       .data = &rmii_ck_data,
+};
+
+static struct ti_clk_gate gpt6_gate_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 7,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite gpt6_fck_data = {
+       .mux = &gpt6_mux_fck_data,
+       .gate = &gpt6_gate_fck_data,
+};
+
+static struct ti_clk gpt6_fck = {
+       .name = "gpt6_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt6_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d4_ck_data = {
+       .parent = "dpll5_m2_ck",
+       .div = 4,
+       .mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d4_ck = {
+       .name = "dpll5_m2_d4_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll5_m2_d4_ck_data,
+};
+
+static struct ti_clk_fixed_factor sys_d2_ck_data = {
+       .parent = "sys_ck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk sys_d2_ck = {
+       .name = "sys_d2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &sys_d2_ck_data,
+};
+
+static struct ti_clk_fixed_factor omap_96m_d2_fck_data = {
+       .parent = "omap_96m_fck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk omap_96m_d2_fck = {
+       .name = "omap_96m_d2_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_96m_d2_fck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d8_ck_data = {
+       .parent = "dpll5_m2_ck",
+       .div = 8,
+       .mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d8_ck = {
+       .name = "dpll5_m2_d8_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll5_m2_d8_ck_data,
+};
+
+static struct ti_clk_fixed_factor dpll5_m2_d16_ck_data = {
+       .parent = "dpll5_m2_ck",
+       .div = 16,
+       .mult = 1,
+};
+
+static struct ti_clk dpll5_m2_d16_ck = {
+       .name = "dpll5_m2_d16_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll5_m2_d16_ck_data,
+};
+
+static const char *usim_mux_fck_parents[] = {
+       "sys_ck",
+       "sys_d2_ck",
+       "omap_96m_d2_fck",
+       "omap_96m_d4_fck",
+       "omap_96m_d8_fck",
+       "omap_96m_d10_fck",
+       "dpll5_m2_d4_ck",
+       "dpll5_m2_d8_ck",
+       "dpll5_m2_d16_ck",
+       "dpll5_m2_d20_ck",
+};
+
+static struct ti_clk_mux usim_mux_fck_data = {
+       .bit_shift = 3,
+       .num_parents = ARRAY_SIZE(usim_mux_fck_parents),
+       .reg = 0xc40,
+       .module = TI_CLKM_CM,
+       .parents = usim_mux_fck_parents,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk_composite usim_fck_data = {
+       .mux = &usim_mux_fck_data,
+       .gate = &usim_gate_fck_data,
+};
+
+static struct ti_clk usim_fck = {
+       .name = "usim_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &usim_fck_data,
+};
+
+static int ssi_ssr_div_fck_3430es2_divs[] = {
+       0,
+       1,
+       2,
+       3,
+       4,
+       0,
+       6,
+       0,
+       8,
+};
+
+static struct ti_clk_divider ssi_ssr_div_fck_3430es2_data = {
+       .num_dividers = ARRAY_SIZE(ssi_ssr_div_fck_3430es2_divs),
+       .parent = "corex2_fck",
+       .bit_shift = 8,
+       .dividers = ssi_ssr_div_fck_3430es2_divs,
+       .reg = 0xa40,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite ssi_ssr_fck_3430es2_data = {
+       .gate = &ssi_ssr_gate_fck_3430es2_data,
+       .divider = &ssi_ssr_div_fck_3430es2_data,
+};
+
+static struct ti_clk ssi_ssr_fck_3430es2 = {
+       .name = "ssi_ssr_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &ssi_ssr_fck_3430es2_data,
+};
+
+static struct ti_clk_gate dss1_alwon_fck_3430es1_data = {
+       .parent = "dpll4_m4x2_ck",
+       .bit_shift = 0,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SET_RATE_PARENT,
+};
+
+static struct ti_clk dss1_alwon_fck_3430es1 = {
+       .name = "dss1_alwon_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss1_alwon_fck_3430es1_data,
+};
+
+static struct ti_clk_gate gpt3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 4,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt3_ick = {
+       .name = "gpt3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt3_ick_data,
+};
+
+static struct ti_clk_fixed_factor omap_12m_fck_data = {
+       .parent = "omap_48m_fck",
+       .div = 4,
+       .mult = 1,
+};
+
+static struct ti_clk omap_12m_fck = {
+       .name = "omap_12m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &omap_12m_fck_data,
+};
+
+static struct ti_clk_fixed_factor core_12m_fck_data = {
+       .parent = "omap_12m_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk core_12m_fck = {
+       .name = "core_12m_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &core_12m_fck_data,
+};
+
+static struct ti_clk_gate hdq_fck_data = {
+       .parent = "core_12m_fck",
+       .bit_shift = 22,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk hdq_fck = {
+       .name = "hdq_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hdq_fck_data,
+};
+
+static struct ti_clk_gate usbtll_fck_data = {
+       .parent = "dpll5_m2_ck",
+       .bit_shift = 2,
+       .reg = 0xa08,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk usbtll_fck = {
+       .name = "usbtll_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbtll_fck_data,
+};
+
+static struct ti_clk_gate hsotgusb_fck_am35xx_data = {
+       .parent = "sys_ck",
+       .bit_shift = 8,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+};
+
+static struct ti_clk hsotgusb_fck_am35xx = {
+       .name = "hsotgusb_fck_am35xx",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hsotgusb_fck_am35xx_data,
+};
+
+static struct ti_clk_gate hsotgusb_ick_3430es2_data = {
+       .parent = "core_l3_ick",
+       .bit_shift = 4,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSOTGUSB | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk hsotgusb_ick_3430es2 = {
+       .name = "hsotgusb_ick_3430es2",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hsotgusb_ick_3430es2_data,
+};
+
+static struct ti_clk_gate gfx_l3_ck_data = {
+       .parent = "l3_ick",
+       .bit_shift = 0,
+       .reg = 0xb10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk gfx_l3_ck = {
+       .name = "gfx_l3_ck",
+       .clkdm_name = "gfx_3430es1_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gfx_l3_ck_data,
+};
+
+static struct ti_clk_fixed_factor gfx_l3_ick_data = {
+       .parent = "gfx_l3_ck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk gfx_l3_ick = {
+       .name = "gfx_l3_ick",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &gfx_l3_ick_data,
+};
+
+static struct ti_clk_gate mcbsp1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 9,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp1_ick = {
+       .name = "mcbsp1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp1_ick_data,
+};
+
+static struct ti_clk_fixed_factor gpt12_fck_data = {
+       .parent = "secure_32k_fck",
+       .div = 1,
+       .mult = 1,
+};
+
+static struct ti_clk gpt12_fck = {
+       .name = "gpt12_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &gpt12_fck_data,
+};
+
+static struct ti_clk_gate gfx_cg2_ck_data = {
+       .parent = "gfx_l3_fck",
+       .bit_shift = 2,
+       .reg = 0xb00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk gfx_cg2_ck = {
+       .name = "gfx_cg2_ck",
+       .clkdm_name = "gfx_3430es1_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gfx_cg2_ck_data,
+};
+
+static struct ti_clk_gate i2c2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 16,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk i2c2_ick = {
+       .name = "i2c2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c2_ick_data,
+};
+
+static struct ti_clk_gate gpio4_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 15,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio4_dbck = {
+       .name = "gpio4_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio4_dbck_data,
+};
+
+static struct ti_clk_gate i2c3_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 17,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk i2c3_fck = {
+       .name = "i2c3_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c3_fck_data,
+};
+
+static struct ti_clk_composite gpt3_fck_data = {
+       .mux = &gpt3_mux_fck_data,
+       .gate = &gpt3_gate_fck_data,
+};
+
+static struct ti_clk gpt3_fck = {
+       .name = "gpt3_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt3_fck_data,
+};
+
+static struct ti_clk_gate i2c1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 15,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk i2c1_ick = {
+       .name = "i2c1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &i2c1_ick_data,
+};
+
+static struct ti_clk_gate omap_32ksync_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 2,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk omap_32ksync_ick = {
+       .name = "omap_32ksync_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &omap_32ksync_ick_data,
+};
+
+static struct ti_clk_gate aes2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 28,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk aes2_ick = {
+       .name = "aes2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &aes2_ick_data,
+};
+
+static const char *gpt8_mux_fck_parents[] = {
+       "omap_32k_fck",
+       "sys_ck",
+};
+
+static struct ti_clk_mux gpt8_mux_fck_data = {
+       .bit_shift = 6,
+       .num_parents = ARRAY_SIZE(gpt8_mux_fck_parents),
+       .reg = 0x1040,
+       .module = TI_CLKM_CM,
+       .parents = gpt8_mux_fck_parents,
+};
+
+static struct ti_clk_composite gpt8_fck_data = {
+       .mux = &gpt8_mux_fck_data,
+       .gate = &gpt8_gate_fck_data,
+};
+
+static struct ti_clk gpt8_fck = {
+       .name = "gpt8_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt8_fck_data,
+};
+
+static struct ti_clk_gate mcbsp4_gate_fck_data = {
+       .parent = "mcbsp_clks",
+       .bit_shift = 2,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk_composite mcbsp4_fck_data = {
+       .mux = &mcbsp4_mux_fck_data,
+       .gate = &mcbsp4_gate_fck_data,
+};
+
+static struct ti_clk mcbsp4_fck = {
+       .name = "mcbsp4_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &mcbsp4_fck_data,
+};
+
+static struct ti_clk_gate gpio2_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 13,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio2_dbck = {
+       .name = "gpio2_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio2_dbck_data,
+};
+
+static struct ti_clk_gate usbtll_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 2,
+       .reg = 0xa18,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk usbtll_ick = {
+       .name = "usbtll_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &usbtll_ick_data,
+};
+
+static struct ti_clk_gate mcspi4_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 21,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi4_ick = {
+       .name = "mcspi4_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi4_ick_data,
+};
+
+static struct ti_clk_gate dss_96m_fck_data = {
+       .parent = "omap_96m_fck",
+       .bit_shift = 2,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk dss_96m_fck = {
+       .name = "dss_96m_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss_96m_fck_data,
+};
+
+static struct ti_clk_divider rm_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 1,
+       .max_div = 3,
+       .reg = 0xc40,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk rm_ick = {
+       .name = "rm_ick",
+       .type = TI_CLK_DIVIDER,
+       .data = &rm_ick_data,
+};
+
+static struct ti_clk_gate hdq_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 22,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk hdq_ick = {
+       .name = "hdq_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &hdq_ick_data,
+};
+
+static struct ti_clk_fixed_factor dpll3_x2_ck_data = {
+       .parent = "dpll3_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll3_x2_ck = {
+       .name = "dpll3_x2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll3_x2_ck_data,
+};
+
+static struct ti_clk_gate mad2d_ick_data = {
+       .parent = "l3_ick",
+       .bit_shift = 3,
+       .reg = 0xa18,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mad2d_ick = {
+       .name = "mad2d_ick",
+       .clkdm_name = "d2d_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mad2d_ick_data,
+};
+
+static struct ti_clk_gate fshostusb_fck_data = {
+       .parent = "core_48m_fck",
+       .bit_shift = 5,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk fshostusb_fck = {
+       .name = "fshostusb_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &fshostusb_fck_data,
+};
+
+static struct ti_clk_gate sr1_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 6,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk sr1_fck = {
+       .name = "sr1_fck",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sr1_fck_data,
+};
+
+static struct ti_clk_gate des2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 26,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk des2_ick = {
+       .name = "des2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &des2_ick_data,
+};
+
+static struct ti_clk_gate sdrc_ick_data = {
+       .parent = "core_l3_ick",
+       .bit_shift = 1,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk sdrc_ick = {
+       .name = "sdrc_ick",
+       .clkdm_name = "core_l3_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sdrc_ick_data,
+};
+
+static struct ti_clk_composite gpt4_fck_data = {
+       .mux = &gpt4_mux_fck_data,
+       .gate = &gpt4_gate_fck_data,
+};
+
+static struct ti_clk gpt4_fck = {
+       .name = "gpt4_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt4_fck_data,
+};
+
+static struct ti_clk_gate dpll4_m3x2_ck_omap36xx_data = {
+       .parent = "dpll4_m3x2_mul_ck",
+       .bit_shift = 0x1c,
+       .reg = 0xd00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_HSDIV | CLKF_SET_BIT_TO_DISABLE,
+};
+
+static struct ti_clk dpll4_m3x2_ck_omap36xx = {
+       .name = "dpll4_m3x2_ck",
+       .type = TI_CLK_GATE,
+       .data = &dpll4_m3x2_ck_omap36xx_data,
+       .patch = &dpll4_m3x2_ck,
+};
+
+static struct ti_clk_gate cpefuse_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 0,
+       .reg = 0xa08,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk cpefuse_fck = {
+       .name = "cpefuse_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &cpefuse_fck_data,
+};
+
+static struct ti_clk_gate mcspi3_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 20,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi3_ick = {
+       .name = "mcspi3_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi3_ick_data,
+};
+
+static struct ti_clk_fixed_factor ssi_sst_fck_3430es2_data = {
+       .parent = "ssi_ssr_fck",
+       .div = 2,
+       .mult = 1,
+};
+
+static struct ti_clk ssi_sst_fck_3430es2 = {
+       .name = "ssi_sst_fck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &ssi_sst_fck_3430es2_data,
+};
+
+static struct ti_clk_gate gpio1_dbck_data = {
+       .parent = "wkup_32k_fck",
+       .bit_shift = 3,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio1_dbck = {
+       .name = "gpio1_dbck",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio1_dbck_data,
+};
+
+static struct ti_clk_gate gpt4_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 5,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt4_ick = {
+       .name = "gpt4_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt4_ick_data,
+};
+
+static struct ti_clk_gate gpt2_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 3,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt2_ick = {
+       .name = "gpt2_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt2_ick_data,
+};
+
+static struct ti_clk_gate mmchs1_fck_data = {
+       .parent = "core_96m_fck",
+       .bit_shift = 24,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk mmchs1_fck = {
+       .name = "mmchs1_fck",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mmchs1_fck_data,
+};
+
+static struct ti_clk_fixed dummy_apb_pclk_data = {
+       .frequency = 0x0,
+};
+
+static struct ti_clk dummy_apb_pclk = {
+       .name = "dummy_apb_pclk",
+       .type = TI_CLK_FIXED,
+       .data = &dummy_apb_pclk_data,
+};
+
+static struct ti_clk_gate gpio6_dbck_data = {
+       .parent = "per_32k_alwon_fck",
+       .bit_shift = 17,
+       .reg = 0x1000,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk gpio6_dbck = {
+       .name = "gpio6_dbck",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio6_dbck_data,
+};
+
+static struct ti_clk_gate uart2_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 14,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk uart2_ick = {
+       .name = "uart2_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &uart2_ick_data,
+};
+
+static struct ti_clk_fixed_factor dpll4_x2_ck_data = {
+       .parent = "dpll4_ck",
+       .div = 1,
+       .mult = 2,
+};
+
+static struct ti_clk dpll4_x2_ck = {
+       .name = "dpll4_x2_ck",
+       .type = TI_CLK_FIXED_FACTOR,
+       .data = &dpll4_x2_ck_data,
+};
+
+static struct ti_clk_gate gpt7_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 8,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpt7_ick = {
+       .name = "gpt7_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpt7_ick_data,
+};
+
+static struct ti_clk_gate dss_tv_fck_data = {
+       .parent = "omap_54m_fck",
+       .bit_shift = 2,
+       .reg = 0xe00,
+       .module = TI_CLKM_CM,
+};
+
+static struct ti_clk dss_tv_fck = {
+       .name = "dss_tv_fck",
+       .clkdm_name = "dss_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &dss_tv_fck_data,
+};
+
+static struct ti_clk_gate mcbsp5_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 10,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcbsp5_ick = {
+       .name = "mcbsp5_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcbsp5_ick_data,
+};
+
+static struct ti_clk_gate mcspi1_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 18,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk mcspi1_ick = {
+       .name = "mcspi1_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &mcspi1_ick_data,
+};
+
+static struct ti_clk_gate d2d_26m_fck_data = {
+       .parent = "sys_ck",
+       .bit_shift = 3,
+       .reg = 0xa00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk d2d_26m_fck = {
+       .name = "d2d_26m_fck",
+       .clkdm_name = "d2d_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &d2d_26m_fck_data,
+};
+
+static struct ti_clk_gate wdt3_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 12,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk wdt3_ick = {
+       .name = "wdt3_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt3_ick_data,
+};
+
+static struct ti_clk_divider pclkx2_fck_data = {
+       .parent = "emu_src_ck",
+       .bit_shift = 6,
+       .max_div = 3,
+       .reg = 0x1140,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_STARTS_AT_ONE,
+};
+
+static struct ti_clk pclkx2_fck = {
+       .name = "pclkx2_fck",
+       .type = TI_CLK_DIVIDER,
+       .data = &pclkx2_fck_data,
+};
+
+static struct ti_clk_gate sha12_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 27,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk sha12_ick = {
+       .name = "sha12_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sha12_ick_data,
+};
+
+static struct ti_clk_gate emac_fck_data = {
+       .parent = "rmii_ck",
+       .bit_shift = 9,
+       .reg = 0x59c,
+       .module = TI_CLKM_SCRM,
+};
+
+static struct ti_clk emac_fck = {
+       .name = "emac_fck",
+       .type = TI_CLK_GATE,
+       .data = &emac_fck_data,
+};
+
+static struct ti_clk_composite gpt10_fck_data = {
+       .mux = &gpt10_mux_fck_data,
+       .gate = &gpt10_gate_fck_data,
+};
+
+static struct ti_clk gpt10_fck = {
+       .name = "gpt10_fck",
+       .type = TI_CLK_COMPOSITE,
+       .data = &gpt10_fck_data,
+};
+
+static struct ti_clk_gate wdt2_fck_data = {
+       .parent = "wkup_32k_fck",
+       .bit_shift = 5,
+       .reg = 0xc00,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk wdt2_fck = {
+       .name = "wdt2_fck",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt2_fck_data,
+};
+
+static struct ti_clk_gate cam_ick_data = {
+       .parent = "l4_ick",
+       .bit_shift = 0,
+       .reg = 0xf10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_NO_WAIT | CLKF_INTERFACE,
+};
+
+static struct ti_clk cam_ick = {
+       .name = "cam_ick",
+       .clkdm_name = "cam_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &cam_ick_data,
+};
+
+static struct ti_clk_gate ssi_ick_3430es2_data = {
+       .parent = "ssi_l4_ick",
+       .bit_shift = 0,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_SSI | CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk ssi_ick_3430es2 = {
+       .name = "ssi_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &ssi_ick_3430es2_data,
+};
+
+static struct ti_clk_gate gpio4_ick_data = {
+       .parent = "per_l4_ick",
+       .bit_shift = 15,
+       .reg = 0x1010,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk gpio4_ick = {
+       .name = "gpio4_ick",
+       .clkdm_name = "per_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &gpio4_ick_data,
+};
+
+static struct ti_clk_gate wdt1_ick_data = {
+       .parent = "wkup_l4_ick",
+       .bit_shift = 4,
+       .reg = 0xc10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk wdt1_ick = {
+       .name = "wdt1_ick",
+       .clkdm_name = "wkup_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &wdt1_ick_data,
+};
+
+static struct ti_clk_gate rng_ick_data = {
+       .parent = "security_l4_ick2",
+       .bit_shift = 2,
+       .reg = 0xa14,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk rng_ick = {
+       .name = "rng_ick",
+       .type = TI_CLK_GATE,
+       .data = &rng_ick_data,
+};
+
+static struct ti_clk_gate icr_ick_data = {
+       .parent = "core_l4_ick",
+       .bit_shift = 29,
+       .reg = 0xa10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_OMAP3 | CLKF_INTERFACE,
+};
+
+static struct ti_clk icr_ick = {
+       .name = "icr_ick",
+       .clkdm_name = "core_l4_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &icr_ick_data,
+};
+
+static struct ti_clk_gate sgx_ick_data = {
+       .parent = "l3_ick",
+       .bit_shift = 0,
+       .reg = 0xb10,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_WAIT,
+};
+
+static struct ti_clk sgx_ick = {
+       .name = "sgx_ick",
+       .clkdm_name = "sgx_clkdm",
+       .type = TI_CLK_GATE,
+       .data = &sgx_ick_data,
+};
+
+static struct ti_clk_divider sys_clkout2_data = {
+       .parent = "clkout2_src_ck",
+       .bit_shift = 3,
+       .max_div = 64,
+       .reg = 0xd70,
+       .module = TI_CLKM_CM,
+       .flags = CLKF_INDEX_POWER_OF_TWO,
+};
+
+static struct ti_clk sys_clkout2 = {
+       .name = "sys_clkout2",
+       .type = TI_CLK_DIVIDER,
+       .data = &sys_clkout2_data,
+};
+
+static struct ti_clk_alias omap34xx_omap36xx_clks[] = {
+       CLK(NULL, "security_l4_ick2", &security_l4_ick2),
+       CLK(NULL, "aes1_ick", &aes1_ick),
+       CLK("omap_rng", "ick", &rng_ick),
+       CLK("omap3-rom-rng", "ick", &rng_ick),
+       CLK(NULL, "sha11_ick", &sha11_ick),
+       CLK(NULL, "des1_ick", &des1_ick),
+       CLK(NULL, "cam_mclk", &cam_mclk),
+       CLK(NULL, "cam_ick", &cam_ick),
+       CLK(NULL, "csi2_96m_fck", &csi2_96m_fck),
+       CLK(NULL, "security_l3_ick", &security_l3_ick),
+       CLK(NULL, "pka_ick", &pka_ick),
+       CLK(NULL, "icr_ick", &icr_ick),
+       CLK(NULL, "des2_ick", &des2_ick),
+       CLK(NULL, "mspro_ick", &mspro_ick),
+       CLK(NULL, "mailboxes_ick", &mailboxes_ick),
+       CLK(NULL, "ssi_l4_ick", &ssi_l4_ick),
+       CLK(NULL, "sr1_fck", &sr1_fck),
+       CLK(NULL, "sr2_fck", &sr2_fck),
+       CLK(NULL, "sr_l4_ick", &sr_l4_ick),
+       CLK(NULL, "dpll2_fck", &dpll2_fck),
+       CLK(NULL, "dpll2_ck", &dpll2_ck),
+       CLK(NULL, "dpll2_m2_ck", &dpll2_m2_ck),
+       CLK(NULL, "iva2_ck", &iva2_ck),
+       CLK(NULL, "modem_fck", &modem_fck),
+       CLK(NULL, "sad2d_ick", &sad2d_ick),
+       CLK(NULL, "mad2d_ick", &mad2d_ick),
+       CLK(NULL, "mspro_fck", &mspro_fck),
+       { NULL },
+};
+
+static struct ti_clk_alias omap36xx_omap3430es2plus_clks[] = {
+       CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es2),
+       CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es2),
+       CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es2),
+       CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es2),
+       CLK(NULL, "ssi_ick", &ssi_ick_3430es2),
+       CLK(NULL, "sys_d2_ck", &sys_d2_ck),
+       CLK(NULL, "omap_96m_d2_fck", &omap_96m_d2_fck),
+       CLK(NULL, "omap_96m_d4_fck", &omap_96m_d4_fck),
+       CLK(NULL, "omap_96m_d8_fck", &omap_96m_d8_fck),
+       CLK(NULL, "omap_96m_d10_fck", &omap_96m_d10_fck),
+       CLK(NULL, "dpll5_m2_d4_ck", &dpll5_m2_d4_ck),
+       CLK(NULL, "dpll5_m2_d8_ck", &dpll5_m2_d8_ck),
+       CLK(NULL, "dpll5_m2_d16_ck", &dpll5_m2_d16_ck),
+       CLK(NULL, "dpll5_m2_d20_ck", &dpll5_m2_d20_ck),
+       CLK(NULL, "usim_fck", &usim_fck),
+       CLK(NULL, "usim_ick", &usim_ick),
+       { NULL },
+};
+
+static struct ti_clk_alias omap3xxx_clks[] = {
+       CLK(NULL, "apb_pclk", &dummy_apb_pclk),
+       CLK(NULL, "omap_32k_fck", &omap_32k_fck),
+       CLK(NULL, "virt_12m_ck", &virt_12m_ck),
+       CLK(NULL, "virt_13m_ck", &virt_13m_ck),
+       CLK(NULL, "virt_19200000_ck", &virt_19200000_ck),
+       CLK(NULL, "virt_26000000_ck", &virt_26000000_ck),
+       CLK(NULL, "virt_38_4m_ck", &virt_38_4m_ck),
+       CLK(NULL, "virt_16_8m_ck", &virt_16_8m_ck),
+       CLK(NULL, "osc_sys_ck", &osc_sys_ck),
+       CLK("twl", "fck", &osc_sys_ck),
+       CLK(NULL, "sys_ck", &sys_ck),
+       CLK(NULL, "timer_sys_ck", &sys_ck),
+       CLK(NULL, "dpll4_ck", &dpll4_ck),
+       CLK(NULL, "dpll4_m2_ck", &dpll4_m2_ck),
+       CLK(NULL, "dpll4_m2x2_mul_ck", &dpll4_m2x2_mul_ck),
+       CLK(NULL, "dpll4_m2x2_ck", &dpll4_m2x2_ck),
+       CLK(NULL, "omap_96m_alwon_fck", &omap_96m_alwon_fck),
+       CLK(NULL, "dpll3_ck", &dpll3_ck),
+       CLK(NULL, "dpll3_m3_ck", &dpll3_m3_ck),
+       CLK(NULL, "dpll3_m3x2_mul_ck", &dpll3_m3x2_mul_ck),
+       CLK(NULL, "dpll3_m3x2_ck", &dpll3_m3x2_ck),
+       CLK("etb", "emu_core_alwon_ck", &emu_core_alwon_ck),
+       CLK(NULL, "sys_altclk", &sys_altclk),
+       CLK(NULL, "mcbsp_clks", &mcbsp_clks),
+       CLK(NULL, "sys_clkout1", &sys_clkout1),
+       CLK(NULL, "dpll3_m2_ck", &dpll3_m2_ck),
+       CLK(NULL, "core_ck", &core_ck),
+       CLK(NULL, "dpll1_fck", &dpll1_fck),
+       CLK(NULL, "dpll1_ck", &dpll1_ck),
+       CLK(NULL, "cpufreq_ck", &dpll1_ck),
+       CLK(NULL, "dpll1_x2_ck", &dpll1_x2_ck),
+       CLK(NULL, "dpll1_x2m2_ck", &dpll1_x2m2_ck),
+       CLK(NULL, "dpll3_x2_ck", &dpll3_x2_ck),
+       CLK(NULL, "dpll3_m2x2_ck", &dpll3_m2x2_ck),
+       CLK(NULL, "dpll4_x2_ck", &dpll4_x2_ck),
+       CLK(NULL, "cm_96m_fck", &cm_96m_fck),
+       CLK(NULL, "omap_96m_fck", &omap_96m_fck),
+       CLK(NULL, "dpll4_m3_ck", &dpll4_m3_ck),
+       CLK(NULL, "dpll4_m3x2_mul_ck", &dpll4_m3x2_mul_ck),
+       CLK(NULL, "dpll4_m3x2_ck", &dpll4_m3x2_ck),
+       CLK(NULL, "omap_54m_fck", &omap_54m_fck),
+       CLK(NULL, "cm_96m_d2_fck", &cm_96m_d2_fck),
+       CLK(NULL, "omap_48m_fck", &omap_48m_fck),
+       CLK(NULL, "omap_12m_fck", &omap_12m_fck),
+       CLK(NULL, "dpll4_m4_ck", &dpll4_m4_ck),
+       CLK(NULL, "dpll4_m4x2_mul_ck", &dpll4_m4x2_mul_ck),
+       CLK(NULL, "dpll4_m4x2_ck", &dpll4_m4x2_ck),
+       CLK(NULL, "dpll4_m5_ck", &dpll4_m5_ck),
+       CLK(NULL, "dpll4_m5x2_mul_ck", &dpll4_m5x2_mul_ck),
+       CLK(NULL, "dpll4_m5x2_ck", &dpll4_m5x2_ck),
+       CLK(NULL, "dpll4_m6_ck", &dpll4_m6_ck),
+       CLK(NULL, "dpll4_m6x2_mul_ck", &dpll4_m6x2_mul_ck),
+       CLK(NULL, "dpll4_m6x2_ck", &dpll4_m6x2_ck),
+       CLK("etb", "emu_per_alwon_ck", &emu_per_alwon_ck),
+       CLK(NULL, "clkout2_src_ck", &clkout2_src_ck),
+       CLK(NULL, "sys_clkout2", &sys_clkout2),
+       CLK(NULL, "corex2_fck", &corex2_fck),
+       CLK(NULL, "mpu_ck", &mpu_ck),
+       CLK(NULL, "arm_fck", &arm_fck),
+       CLK("etb", "emu_mpu_alwon_ck", &emu_mpu_alwon_ck),
+       CLK(NULL, "l3_ick", &l3_ick),
+       CLK(NULL, "l4_ick", &l4_ick),
+       CLK(NULL, "rm_ick", &rm_ick),
+       CLK(NULL, "timer_32k_ck", &omap_32k_fck),
+       CLK(NULL, "gpt10_fck", &gpt10_fck),
+       CLK(NULL, "gpt11_fck", &gpt11_fck),
+       CLK(NULL, "core_96m_fck", &core_96m_fck),
+       CLK(NULL, "mmchs2_fck", &mmchs2_fck),
+       CLK(NULL, "mmchs1_fck", &mmchs1_fck),
+       CLK(NULL, "i2c3_fck", &i2c3_fck),
+       CLK(NULL, "i2c2_fck", &i2c2_fck),
+       CLK(NULL, "i2c1_fck", &i2c1_fck),
+       CLK(NULL, "mcbsp5_fck", &mcbsp5_fck),
+       CLK(NULL, "mcbsp1_fck", &mcbsp1_fck),
+       CLK(NULL, "core_48m_fck", &core_48m_fck),
+       CLK(NULL, "mcspi4_fck", &mcspi4_fck),
+       CLK(NULL, "mcspi3_fck", &mcspi3_fck),
+       CLK(NULL, "mcspi2_fck", &mcspi2_fck),
+       CLK(NULL, "mcspi1_fck", &mcspi1_fck),
+       CLK(NULL, "uart2_fck", &uart2_fck),
+       CLK(NULL, "uart1_fck", &uart1_fck),
+       CLK(NULL, "core_12m_fck", &core_12m_fck),
+       CLK("omap_hdq.0", "fck", &hdq_fck),
+       CLK(NULL, "hdq_fck", &hdq_fck),
+       CLK(NULL, "core_l3_ick", &core_l3_ick),
+       CLK(NULL, "sdrc_ick", &sdrc_ick),
+       CLK(NULL, "gpmc_fck", &gpmc_fck),
+       CLK(NULL, "core_l4_ick", &core_l4_ick),
+       CLK("omap_hsmmc.1", "ick", &mmchs2_ick),
+       CLK("omap_hsmmc.0", "ick", &mmchs1_ick),
+       CLK(NULL, "mmchs2_ick", &mmchs2_ick),
+       CLK(NULL, "mmchs1_ick", &mmchs1_ick),
+       CLK("omap_hdq.0", "ick", &hdq_ick),
+       CLK(NULL, "hdq_ick", &hdq_ick),
+       CLK("omap2_mcspi.4", "ick", &mcspi4_ick),
+       CLK("omap2_mcspi.3", "ick", &mcspi3_ick),
+       CLK("omap2_mcspi.2", "ick", &mcspi2_ick),
+       CLK("omap2_mcspi.1", "ick", &mcspi1_ick),
+       CLK(NULL, "mcspi4_ick", &mcspi4_ick),
+       CLK(NULL, "mcspi3_ick", &mcspi3_ick),
+       CLK(NULL, "mcspi2_ick", &mcspi2_ick),
+       CLK(NULL, "mcspi1_ick", &mcspi1_ick),
+       CLK("omap_i2c.3", "ick", &i2c3_ick),
+       CLK("omap_i2c.2", "ick", &i2c2_ick),
+       CLK("omap_i2c.1", "ick", &i2c1_ick),
+       CLK(NULL, "i2c3_ick", &i2c3_ick),
+       CLK(NULL, "i2c2_ick", &i2c2_ick),
+       CLK(NULL, "i2c1_ick", &i2c1_ick),
+       CLK(NULL, "uart2_ick", &uart2_ick),
+       CLK(NULL, "uart1_ick", &uart1_ick),
+       CLK(NULL, "gpt11_ick", &gpt11_ick),
+       CLK(NULL, "gpt10_ick", &gpt10_ick),
+       CLK("omap-mcbsp.5", "ick", &mcbsp5_ick),
+       CLK("omap-mcbsp.1", "ick", &mcbsp1_ick),
+       CLK(NULL, "mcbsp5_ick", &mcbsp5_ick),
+       CLK(NULL, "mcbsp1_ick", &mcbsp1_ick),
+       CLK(NULL, "omapctrl_ick", &omapctrl_ick),
+       CLK(NULL, "dss_tv_fck", &dss_tv_fck),
+       CLK(NULL, "dss_96m_fck", &dss_96m_fck),
+       CLK(NULL, "dss2_alwon_fck", &dss2_alwon_fck),
+       CLK(NULL, "init_60m_fclk", &dummy_ck),
+       CLK(NULL, "gpt1_fck", &gpt1_fck),
+       CLK(NULL, "aes2_ick", &aes2_ick),
+       CLK(NULL, "wkup_32k_fck", &wkup_32k_fck),
+       CLK(NULL, "gpio1_dbck", &gpio1_dbck),
+       CLK(NULL, "sha12_ick", &sha12_ick),
+       CLK(NULL, "wdt2_fck", &wdt2_fck),
+       CLK(NULL, "wkup_l4_ick", &wkup_l4_ick),
+       CLK("omap_wdt", "ick", &wdt2_ick),
+       CLK(NULL, "wdt2_ick", &wdt2_ick),
+       CLK(NULL, "wdt1_ick", &wdt1_ick),
+       CLK(NULL, "gpio1_ick", &gpio1_ick),
+       CLK(NULL, "omap_32ksync_ick", &omap_32ksync_ick),
+       CLK(NULL, "gpt12_ick", &gpt12_ick),
+       CLK(NULL, "gpt1_ick", &gpt1_ick),
+       CLK(NULL, "per_96m_fck", &per_96m_fck),
+       CLK(NULL, "per_48m_fck", &per_48m_fck),
+       CLK(NULL, "uart3_fck", &uart3_fck),
+       CLK(NULL, "gpt2_fck", &gpt2_fck),
+       CLK(NULL, "gpt3_fck", &gpt3_fck),
+       CLK(NULL, "gpt4_fck", &gpt4_fck),
+       CLK(NULL, "gpt5_fck", &gpt5_fck),
+       CLK(NULL, "gpt6_fck", &gpt6_fck),
+       CLK(NULL, "gpt7_fck", &gpt7_fck),
+       CLK(NULL, "gpt8_fck", &gpt8_fck),
+       CLK(NULL, "gpt9_fck", &gpt9_fck),
+       CLK(NULL, "per_32k_alwon_fck", &per_32k_alwon_fck),
+       CLK(NULL, "gpio6_dbck", &gpio6_dbck),
+       CLK(NULL, "gpio5_dbck", &gpio5_dbck),
+       CLK(NULL, "gpio4_dbck", &gpio4_dbck),
+       CLK(NULL, "gpio3_dbck", &gpio3_dbck),
+       CLK(NULL, "gpio2_dbck", &gpio2_dbck),
+       CLK(NULL, "wdt3_fck", &wdt3_fck),
+       CLK(NULL, "per_l4_ick", &per_l4_ick),
+       CLK(NULL, "gpio6_ick", &gpio6_ick),
+       CLK(NULL, "gpio5_ick", &gpio5_ick),
+       CLK(NULL, "gpio4_ick", &gpio4_ick),
+       CLK(NULL, "gpio3_ick", &gpio3_ick),
+       CLK(NULL, "gpio2_ick", &gpio2_ick),
+       CLK(NULL, "wdt3_ick", &wdt3_ick),
+       CLK(NULL, "uart3_ick", &uart3_ick),
+       CLK(NULL, "uart4_ick", &uart4_ick),
+       CLK(NULL, "gpt9_ick", &gpt9_ick),
+       CLK(NULL, "gpt8_ick", &gpt8_ick),
+       CLK(NULL, "gpt7_ick", &gpt7_ick),
+       CLK(NULL, "gpt6_ick", &gpt6_ick),
+       CLK(NULL, "gpt5_ick", &gpt5_ick),
+       CLK(NULL, "gpt4_ick", &gpt4_ick),
+       CLK(NULL, "gpt3_ick", &gpt3_ick),
+       CLK(NULL, "gpt2_ick", &gpt2_ick),
+       CLK("omap-mcbsp.2", "ick", &mcbsp2_ick),
+       CLK("omap-mcbsp.3", "ick", &mcbsp3_ick),
+       CLK("omap-mcbsp.4", "ick", &mcbsp4_ick),
+       CLK(NULL, "mcbsp4_ick", &mcbsp2_ick),
+       CLK(NULL, "mcbsp3_ick", &mcbsp3_ick),
+       CLK(NULL, "mcbsp2_ick", &mcbsp4_ick),
+       CLK(NULL, "mcbsp2_fck", &mcbsp2_fck),
+       CLK(NULL, "mcbsp3_fck", &mcbsp3_fck),
+       CLK(NULL, "mcbsp4_fck", &mcbsp4_fck),
+       CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
+       CLK("etb", "emu_src_ck", &emu_src_ck),
+       CLK(NULL, "emu_src_mux_ck", &emu_src_mux_ck),
+       CLK(NULL, "emu_src_ck", &emu_src_ck),
+       CLK(NULL, "pclk_fck", &pclk_fck),
+       CLK(NULL, "pclkx2_fck", &pclkx2_fck),
+       CLK(NULL, "atclk_fck", &atclk_fck),
+       CLK(NULL, "traceclk_src_fck", &traceclk_src_fck),
+       CLK(NULL, "traceclk_fck", &traceclk_fck),
+       CLK(NULL, "secure_32k_fck", &secure_32k_fck),
+       CLK(NULL, "gpt12_fck", &gpt12_fck),
+       CLK(NULL, "wdt1_fck", &wdt1_fck),
+       { NULL },
+};
+
+static struct ti_clk_alias omap36xx_am35xx_omap3430es2plus_clks[] = {
+       CLK(NULL, "dpll5_ck", &dpll5_ck),
+       CLK(NULL, "dpll5_m2_ck", &dpll5_m2_ck),
+       CLK(NULL, "core_d3_ck", &core_d3_ck),
+       CLK(NULL, "core_d4_ck", &core_d4_ck),
+       CLK(NULL, "core_d6_ck", &core_d6_ck),
+       CLK(NULL, "omap_192m_alwon_fck", &omap_192m_alwon_fck),
+       CLK(NULL, "core_d2_ck", &core_d2_ck),
+       CLK(NULL, "corex2_d3_fck", &corex2_d3_fck),
+       CLK(NULL, "corex2_d5_fck", &corex2_d5_fck),
+       CLK(NULL, "sgx_fck", &sgx_fck),
+       CLK(NULL, "sgx_ick", &sgx_ick),
+       CLK(NULL, "cpefuse_fck", &cpefuse_fck),
+       CLK(NULL, "ts_fck", &ts_fck),
+       CLK(NULL, "usbtll_fck", &usbtll_fck),
+       CLK(NULL, "usbtll_ick", &usbtll_ick),
+       CLK("omap_hsmmc.2", "ick", &mmchs3_ick),
+       CLK(NULL, "mmchs3_ick", &mmchs3_ick),
+       CLK(NULL, "mmchs3_fck", &mmchs3_fck),
+       CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es2),
+       CLK("omapdss_dss", "ick", &dss_ick_3430es2),
+       CLK(NULL, "dss_ick", &dss_ick_3430es2),
+       CLK(NULL, "usbhost_120m_fck", &usbhost_120m_fck),
+       CLK(NULL, "usbhost_48m_fck", &usbhost_48m_fck),
+       CLK(NULL, "usbhost_ick", &usbhost_ick),
+       { NULL },
+};
+
+static struct ti_clk_alias omap3430es1_clks[] = {
+       CLK(NULL, "gfx_l3_ck", &gfx_l3_ck),
+       CLK(NULL, "gfx_l3_fck", &gfx_l3_fck),
+       CLK(NULL, "gfx_l3_ick", &gfx_l3_ick),
+       CLK(NULL, "gfx_cg1_ck", &gfx_cg1_ck),
+       CLK(NULL, "gfx_cg2_ck", &gfx_cg2_ck),
+       CLK(NULL, "d2d_26m_fck", &d2d_26m_fck),
+       CLK(NULL, "fshostusb_fck", &fshostusb_fck),
+       CLK(NULL, "ssi_ssr_fck", &ssi_ssr_fck_3430es1),
+       CLK(NULL, "ssi_sst_fck", &ssi_sst_fck_3430es1),
+       CLK("musb-omap2430", "ick", &hsotgusb_ick_3430es1),
+       CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_3430es1),
+       CLK(NULL, "fac_ick", &fac_ick),
+       CLK(NULL, "ssi_ick", &ssi_ick_3430es1),
+       CLK(NULL, "usb_l4_ick", &usb_l4_ick),
+       CLK(NULL, "dss1_alwon_fck", &dss1_alwon_fck_3430es1),
+       CLK("omapdss_dss", "ick", &dss_ick_3430es1),
+       CLK(NULL, "dss_ick", &dss_ick_3430es1),
+       { NULL },
+};
+
+static struct ti_clk_alias omap36xx_clks[] = {
+       CLK(NULL, "uart4_fck", &uart4_fck),
+       { NULL },
+};
+
+static struct ti_clk_alias am35xx_clks[] = {
+       CLK(NULL, "ipss_ick", &ipss_ick),
+       CLK(NULL, "rmii_ck", &rmii_ck),
+       CLK(NULL, "pclk_ck", &pclk_ck),
+       CLK(NULL, "emac_ick", &emac_ick),
+       CLK(NULL, "emac_fck", &emac_fck),
+       CLK("davinci_emac.0", NULL, &emac_ick),
+       CLK("davinci_mdio.0", NULL, &emac_fck),
+       CLK("vpfe-capture", "master", &vpfe_ick),
+       CLK("vpfe-capture", "slave", &vpfe_fck),
+       CLK(NULL, "hsotgusb_ick", &hsotgusb_ick_am35xx),
+       CLK(NULL, "hsotgusb_fck", &hsotgusb_fck_am35xx),
+       CLK(NULL, "hecc_ck", &hecc_ck),
+       CLK(NULL, "uart4_ick", &uart4_ick_am35xx),
+       CLK(NULL, "uart4_fck", &uart4_fck_am35xx),
+       { NULL },
+};
+
+static struct ti_clk *omap36xx_clk_patches[] = {
+       &dpll4_m3x2_ck_omap36xx,
+       &dpll3_m3x2_ck_omap36xx,
+       &dpll4_m6x2_ck_omap36xx,
+       &dpll4_m2x2_ck_omap36xx,
+       &dpll4_m5x2_ck_omap36xx,
+       &dpll4_ck_omap36xx,
+       NULL,
+};
+
+static const char *enable_init_clks[] = {
+       "sdrc_ick",
+       "gpmc_fck",
+       "omapctrl_ick",
+};
+
+static void __init omap3_clk_legacy_common_init(void)
+{
+       omap2_clk_disable_autoidle_all();
+
+       omap2_clk_enable_init_clocks(enable_init_clks,
+                                    ARRAY_SIZE(enable_init_clks));
+
+       pr_info("Clocking rate (Crystal/Core/MPU): %ld.%01ld/%ld/%ld MHz\n",
+               (clk_get_rate(osc_sys_ck.clk) / 1000000),
+               (clk_get_rate(osc_sys_ck.clk) / 100000) % 10,
+               (clk_get_rate(core_ck.clk) / 1000000),
+               (clk_get_rate(arm_fck.clk) / 1000000));
+}
+
+int __init omap3430es1_clk_legacy_init(void)
+{
+       int r;
+
+       r = ti_clk_register_legacy_clks(omap3430es1_clks);
+       r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
+       r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+       omap3_clk_legacy_common_init();
+
+       return r;
+}
+
+int __init omap3430_clk_legacy_init(void)
+{
+       int r;
+
+       r = ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+       omap3_clk_legacy_common_init();
+       omap3_clk_lock_dpll5();
+
+       return r;
+}
+
+int __init omap36xx_clk_legacy_init(void)
+{
+       int r;
+
+       ti_clk_patch_legacy_clks(omap36xx_clk_patches);
+       r = ti_clk_register_legacy_clks(omap36xx_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap34xx_omap36xx_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+       omap3_clk_legacy_common_init();
+       omap3_clk_lock_dpll5();
+
+       return r;
+}
+
+int __init am35xx_clk_legacy_init(void)
+{
+       int r;
+
+       r = ti_clk_register_legacy_clks(am35xx_clks);
+       r |= ti_clk_register_legacy_clks(omap36xx_am35xx_omap3430es2plus_clks);
+       r |= ti_clk_register_legacy_clks(omap3xxx_clks);
+
+       omap3_clk_legacy_common_init();
+       omap3_clk_lock_dpll5();
+
+       return r;
+}
index 0d1750a8aea40db16a470c697418d1dbd8b6bb45..383a06e49b09db95e465260369fe307afebb92e2 100644 (file)
@@ -327,7 +327,6 @@ enum {
        OMAP3_SOC_OMAP3430_ES1,
        OMAP3_SOC_OMAP3430_ES2_PLUS,
        OMAP3_SOC_OMAP3630,
-       OMAP3_SOC_TI81XX,
 };
 
 static int __init omap3xxx_dt_clk_init(int soc_type)
@@ -370,7 +369,7 @@ static int __init omap3xxx_dt_clk_init(int soc_type)
                (clk_get_rate(clk_get_sys(NULL, "core_ck")) / 1000000),
                (clk_get_rate(clk_get_sys(NULL, "arm_fck")) / 1000000));
 
-       if (soc_type != OMAP3_SOC_TI81XX && soc_type != OMAP3_SOC_OMAP3430_ES1)
+       if (soc_type != OMAP3_SOC_OMAP3430_ES1)
                omap3_clk_lock_dpll5();
 
        return 0;
@@ -390,8 +389,3 @@ int __init am35xx_dt_clk_init(void)
 {
        return omap3xxx_dt_clk_init(OMAP3_SOC_AM35XX);
 }
-
-int __init ti81xx_dt_clk_init(void)
-{
-       return omap3xxx_dt_clk_init(OMAP3_SOC_TI81XX);
-}
index 02517a8206bda8eda55ef32aac17f8163dc2d064..4f4c87751db521d0ce05dcd82e2a62e81408f9d9 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
index 5e183993e3ec56b926fffd252325000d02566d03..14160b2235480f1b2855a5c1f3d16865321ce18a 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/io.h>
 #include <linux/clk/ti.h>
index 62ac8f6e480c61abf1760aeec2ebc980fd716cc3..ee32f4deebf40280be8e4d4e6e454e815c53cd67 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/kernel.h>
 #include <linux/list.h>
-#include <linux/clk-private.h>
+#include <linux/clk.h>
 #include <linux/clkdev.h>
 #include <linux/clk/ti.h>
 
diff --git a/drivers/clk/ti/clk-816x.c b/drivers/clk/ti/clk-816x.c
new file mode 100644 (file)
index 0000000..9451e65
--- /dev/null
@@ -0,0 +1,53 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/clk-provider.h>
+#include <linux/clk/ti.h>
+
+static struct ti_dt_clk dm816x_clks[] = {
+       DT_CLK(NULL, "sys_clkin", "sys_clkin_ck"),
+       DT_CLK(NULL, "timer_sys_ck", "sys_clkin_ck"),
+       DT_CLK(NULL, "sys_32k_ck", "sys_32k_ck"),
+       DT_CLK(NULL, "mpu_ck", "mpu_ck"),
+       DT_CLK(NULL, "timer1_fck", "timer1_fck"),
+       DT_CLK(NULL, "timer2_fck", "timer2_fck"),
+       DT_CLK(NULL, "timer3_fck", "timer3_fck"),
+       DT_CLK(NULL, "timer4_fck", "timer4_fck"),
+       DT_CLK(NULL, "timer5_fck", "timer5_fck"),
+       DT_CLK(NULL, "timer6_fck", "timer6_fck"),
+       DT_CLK(NULL, "timer7_fck", "timer7_fck"),
+       DT_CLK(NULL, "sysclk4_ck", "sysclk4_ck"),
+       DT_CLK(NULL, "sysclk5_ck", "sysclk5_ck"),
+       DT_CLK(NULL, "sysclk6_ck", "sysclk6_ck"),
+       DT_CLK(NULL, "sysclk10_ck", "sysclk10_ck"),
+       DT_CLK(NULL, "sysclk18_ck", "sysclk18_ck"),
+       DT_CLK(NULL, "sysclk24_ck", "sysclk24_ck"),
+       DT_CLK("4a100000.ethernet", "sysclk24_ck", "sysclk24_ck"),
+       { .node_name = NULL },
+};
+
+static const char *enable_init_clks[] = {
+       "ddr_pll_clk1",
+       "ddr_pll_clk2",
+       "ddr_pll_clk3",
+};
+
+int __init ti81xx_dt_clk_init(void)
+{
+       ti_dt_clocks_register(dm816x_clks);
+       omap2_clk_disable_autoidle_all();
+       omap2_clk_enable_init_clocks(enable_init_clks,
+                                    ARRAY_SIZE(enable_init_clks));
+
+       return 0;
+}
index 337abe5909e1f272cdcdca85e5ae0fb3fe1f4729..e22b95646e09a8357e5adb3786f726c8e6baab10 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/of_address.h>
 #include <linux/list.h>
 
+#include "clock.h"
+
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
@@ -183,3 +185,128 @@ void ti_dt_clk_init_retry_clks(void)
                retries--;
        }
 }
+
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+void __init ti_clk_patch_legacy_clks(struct ti_clk **patch)
+{
+       while (*patch) {
+               memcpy((*patch)->patch, *patch, sizeof(**patch));
+               patch++;
+       }
+}
+
+struct clk __init *ti_clk_register_clk(struct ti_clk *setup)
+{
+       struct clk *clk;
+       struct ti_clk_fixed *fixed;
+       struct ti_clk_fixed_factor *fixed_factor;
+       struct clk_hw *clk_hw;
+
+       if (setup->clk)
+               return setup->clk;
+
+       switch (setup->type) {
+       case TI_CLK_FIXED:
+               fixed = setup->data;
+
+               clk = clk_register_fixed_rate(NULL, setup->name, NULL,
+                                             CLK_IS_ROOT, fixed->frequency);
+               break;
+       case TI_CLK_MUX:
+               clk = ti_clk_register_mux(setup);
+               break;
+       case TI_CLK_DIVIDER:
+               clk = ti_clk_register_divider(setup);
+               break;
+       case TI_CLK_COMPOSITE:
+               clk = ti_clk_register_composite(setup);
+               break;
+       case TI_CLK_FIXED_FACTOR:
+               fixed_factor = setup->data;
+
+               clk = clk_register_fixed_factor(NULL, setup->name,
+                                               fixed_factor->parent,
+                                               0, fixed_factor->mult,
+                                               fixed_factor->div);
+               break;
+       case TI_CLK_GATE:
+               clk = ti_clk_register_gate(setup);
+               break;
+       case TI_CLK_DPLL:
+               clk = ti_clk_register_dpll(setup);
+               break;
+       default:
+               pr_err("bad type for %s!\n", setup->name);
+               clk = ERR_PTR(-EINVAL);
+       }
+
+       if (!IS_ERR(clk)) {
+               setup->clk = clk;
+               if (setup->clkdm_name) {
+                       if (__clk_get_flags(clk) & CLK_IS_BASIC) {
+                               pr_warn("can't setup clkdm for basic clk %s\n",
+                                       setup->name);
+                       } else {
+                               clk_hw = __clk_get_hw(clk);
+                               to_clk_hw_omap(clk_hw)->clkdm_name =
+                                       setup->clkdm_name;
+                               omap2_init_clk_clkdm(clk_hw);
+                       }
+               }
+       }
+
+       return clk;
+}
+
+int __init ti_clk_register_legacy_clks(struct ti_clk_alias *clks)
+{
+       struct clk *clk;
+       bool retry;
+       struct ti_clk_alias *retry_clk;
+       struct ti_clk_alias *tmp;
+
+       while (clks->clk) {
+               clk = ti_clk_register_clk(clks->clk);
+               if (IS_ERR(clk)) {
+                       if (PTR_ERR(clk) == -EAGAIN) {
+                               list_add(&clks->link, &retry_list);
+                       } else {
+                               pr_err("register for %s failed: %ld\n",
+                                      clks->clk->name, PTR_ERR(clk));
+                               return PTR_ERR(clk);
+                       }
+               } else {
+                       clks->lk.clk = clk;
+                       clkdev_add(&clks->lk);
+               }
+               clks++;
+       }
+
+       retry = true;
+
+       while (!list_empty(&retry_list) && retry) {
+               retry = false;
+               list_for_each_entry_safe(retry_clk, tmp, &retry_list, link) {
+                       pr_debug("retry-init: %s\n", retry_clk->clk->name);
+                       clk = ti_clk_register_clk(retry_clk->clk);
+                       if (IS_ERR(clk)) {
+                               if (PTR_ERR(clk) == -EAGAIN) {
+                                       continue;
+                               } else {
+                                       pr_err("register for %s failed: %ld\n",
+                                              retry_clk->clk->name,
+                                              PTR_ERR(clk));
+                                       return PTR_ERR(clk);
+                               }
+                       } else {
+                               retry = true;
+                               retry_clk->lk.clk = clk;
+                               clkdev_add(&retry_clk->lk);
+                               list_del(&retry_clk->link);
+                       }
+               }
+       }
+
+       return 0;
+}
+#endif
diff --git a/drivers/clk/ti/clock.h b/drivers/clk/ti/clock.h
new file mode 100644 (file)
index 0000000..404158d
--- /dev/null
@@ -0,0 +1,172 @@
+/*
+ * TI Clock driver internal definitions
+ *
+ * Copyright (C) 2014 Texas Instruments, Inc
+ *     Tero Kristo (t-kristo@ti.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef __DRIVERS_CLK_TI_CLOCK__
+#define __DRIVERS_CLK_TI_CLOCK__
+
+enum {
+       TI_CLK_FIXED,
+       TI_CLK_MUX,
+       TI_CLK_DIVIDER,
+       TI_CLK_COMPOSITE,
+       TI_CLK_FIXED_FACTOR,
+       TI_CLK_GATE,
+       TI_CLK_DPLL,
+};
+
+/* Global flags */
+#define CLKF_INDEX_POWER_OF_TWO                (1 << 0)
+#define CLKF_INDEX_STARTS_AT_ONE       (1 << 1)
+#define CLKF_SET_RATE_PARENT           (1 << 2)
+#define CLKF_OMAP3                     (1 << 3)
+#define CLKF_AM35XX                    (1 << 4)
+
+/* Gate flags */
+#define CLKF_SET_BIT_TO_DISABLE                (1 << 5)
+#define CLKF_INTERFACE                 (1 << 6)
+#define CLKF_SSI                       (1 << 7)
+#define CLKF_DSS                       (1 << 8)
+#define CLKF_HSOTGUSB                  (1 << 9)
+#define CLKF_WAIT                      (1 << 10)
+#define CLKF_NO_WAIT                   (1 << 11)
+#define CLKF_HSDIV                     (1 << 12)
+#define CLKF_CLKDM                     (1 << 13)
+
+/* DPLL flags */
+#define CLKF_LOW_POWER_STOP            (1 << 5)
+#define CLKF_LOCK                      (1 << 6)
+#define CLKF_LOW_POWER_BYPASS          (1 << 7)
+#define CLKF_PER                       (1 << 8)
+#define CLKF_CORE                      (1 << 9)
+#define CLKF_J_TYPE                    (1 << 10)
+
+#define CLK(dev, con, ck)              \
+       {                               \
+               .lk = {                 \
+                       .dev_id = dev,  \
+                       .con_id = con,  \
+               },                      \
+               .clk = ck,              \
+       }
+
+struct ti_clk {
+       const char *name;
+       const char *clkdm_name;
+       int type;
+       void *data;
+       struct ti_clk *patch;
+       struct clk *clk;
+};
+
+struct ti_clk_alias {
+       struct ti_clk *clk;
+       struct clk_lookup lk;
+       struct list_head link;
+};
+
+struct ti_clk_fixed {
+       u32 frequency;
+       u16 flags;
+};
+
+struct ti_clk_mux {
+       u8 bit_shift;
+       int num_parents;
+       u16 reg;
+       u8 module;
+       const char **parents;
+       u16 flags;
+};
+
+struct ti_clk_divider {
+       const char *parent;
+       u8 bit_shift;
+       u16 max_div;
+       u16 reg;
+       u8 module;
+       int *dividers;
+       int num_dividers;
+       u16 flags;
+};
+
+struct ti_clk_fixed_factor {
+       const char *parent;
+       u16 div;
+       u16 mult;
+       u16 flags;
+};
+
+struct ti_clk_gate {
+       const char *parent;
+       u8 bit_shift;
+       u16 reg;
+       u8 module;
+       u16 flags;
+};
+
+struct ti_clk_composite {
+       struct ti_clk_divider *divider;
+       struct ti_clk_mux *mux;
+       struct ti_clk_gate *gate;
+       u16 flags;
+};
+
+struct ti_clk_clkdm_gate {
+       const char *parent;
+       u16 flags;
+};
+
+struct ti_clk_dpll {
+       int num_parents;
+       u16 control_reg;
+       u16 idlest_reg;
+       u16 autoidle_reg;
+       u16 mult_div1_reg;
+       u8 module;
+       const char **parents;
+       u16 flags;
+       u8 modes;
+       u32 mult_mask;
+       u32 div1_mask;
+       u32 enable_mask;
+       u32 autoidle_mask;
+       u32 freqsel_mask;
+       u32 idlest_mask;
+       u32 dco_mask;
+       u32 sddiv_mask;
+       u16 max_multiplier;
+       u16 max_divider;
+       u8 min_divider;
+       u8 auto_recal_bit;
+       u8 recal_en_bit;
+       u8 recal_st_bit;
+};
+
+struct clk *ti_clk_register_gate(struct ti_clk *setup);
+struct clk *ti_clk_register_interface(struct ti_clk *setup);
+struct clk *ti_clk_register_mux(struct ti_clk *setup);
+struct clk *ti_clk_register_divider(struct ti_clk *setup);
+struct clk *ti_clk_register_composite(struct ti_clk *setup);
+struct clk *ti_clk_register_dpll(struct ti_clk *setup);
+
+struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup);
+struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup);
+struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup);
+
+void ti_clk_patch_legacy_clks(struct ti_clk **patch);
+struct clk *ti_clk_register_clk(struct ti_clk *setup);
+int ti_clk_register_legacy_clks(struct ti_clk_alias *clks);
+
+#endif
index 19d8980ba458ef4e37f69cd38ba9fe6093d3ecfb..3654f61912ebb7d099771f5f5e7fba9a5bbc4e92 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/clk/ti.h>
 #include <linux/list.h>
 
+#include "clock.h"
+
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
 
@@ -116,8 +118,46 @@ static inline struct clk_hw *_get_hw(struct clk_hw_omap_comp *clk, int idx)
 
 #define to_clk_hw_comp(_hw) container_of(_hw, struct clk_hw_omap_comp, hw)
 
-static void __init ti_clk_register_composite(struct clk_hw *hw,
-                                            struct device_node *node)
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+struct clk *ti_clk_register_composite(struct ti_clk *setup)
+{
+       struct ti_clk_composite *comp;
+       struct clk_hw *gate;
+       struct clk_hw *mux;
+       struct clk_hw *div;
+       int num_parents = 1;
+       const char **parent_names = NULL;
+       struct clk *clk;
+
+       comp = setup->data;
+
+       div = ti_clk_build_component_div(comp->divider);
+       gate = ti_clk_build_component_gate(comp->gate);
+       mux = ti_clk_build_component_mux(comp->mux);
+
+       if (div)
+               parent_names = &comp->divider->parent;
+
+       if (gate)
+               parent_names = &comp->gate->parent;
+
+       if (mux) {
+               num_parents = comp->mux->num_parents;
+               parent_names = comp->mux->parents;
+       }
+
+       clk = clk_register_composite(NULL, setup->name,
+                                    parent_names, num_parents, mux,
+                                    &ti_clk_mux_ops, div,
+                                    &ti_composite_divider_ops, gate,
+                                    &ti_composite_gate_ops, 0);
+
+       return clk;
+}
+#endif
+
+static void __init _register_composite(struct clk_hw *hw,
+                                      struct device_node *node)
 {
        struct clk *clk;
        struct clk_hw_omap_comp *cclk = to_clk_hw_comp(hw);
@@ -136,7 +176,7 @@ static void __init ti_clk_register_composite(struct clk_hw *hw,
                        pr_debug("component %s not ready for %s, retry\n",
                                 cclk->comp_nodes[i]->name, node->name);
                        if (!ti_clk_retry_init(node, hw,
-                                              ti_clk_register_composite))
+                                              _register_composite))
                                return;
 
                        goto cleanup;
@@ -216,7 +256,7 @@ static void __init of_ti_composite_clk_setup(struct device_node *node)
        for (i = 0; i < num_clks; i++)
                cclk->comp_nodes[i] = _get_component_node(node, i);
 
-       ti_clk_register_composite(&cclk->hw, node);
+       _register_composite(&cclk->hw, node);
 }
 CLK_OF_DECLARE(ti_composite_clock, "ti,composite-clock",
               of_ti_composite_clk_setup);
index bff2b5b8ff598b2e150496eb3ebc984c651a0ebd..6211893c0980665749aa2c08577f9f3c17da4a78 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -300,6 +301,134 @@ static struct clk *_register_divider(struct device *dev, const char *name,
        return clk;
 }
 
+static struct clk_div_table *
+_get_div_table_from_setup(struct ti_clk_divider *setup, u8 *width)
+{
+       int valid_div = 0;
+       struct clk_div_table *table;
+       int i;
+       int div;
+       u32 val;
+       u8 flags;
+
+       if (!setup->num_dividers) {
+               /* Clk divider table not provided, determine min/max divs */
+               flags = setup->flags;
+
+               if (flags & CLKF_INDEX_STARTS_AT_ONE)
+                       val = 1;
+               else
+                       val = 0;
+
+               div = 1;
+
+               while (div < setup->max_div) {
+                       if (flags & CLKF_INDEX_POWER_OF_TWO)
+                               div <<= 1;
+                       else
+                               div++;
+                       val++;
+               }
+
+               *width = fls(val);
+
+               return NULL;
+       }
+
+       for (i = 0; i < setup->num_dividers; i++)
+               if (setup->dividers[i])
+                       valid_div++;
+
+       table = kzalloc(sizeof(*table) * (valid_div + 1), GFP_KERNEL);
+       if (!table)
+               return ERR_PTR(-ENOMEM);
+
+       valid_div = 0;
+       *width = 0;
+
+       for (i = 0; i < setup->num_dividers; i++)
+               if (setup->dividers[i]) {
+                       table[valid_div].div = setup->dividers[i];
+                       table[valid_div].val = i;
+                       valid_div++;
+                       *width = i;
+               }
+
+       *width = fls(*width);
+
+       return table;
+}
+
+struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
+{
+       struct clk_divider *div;
+       struct clk_omap_reg *reg;
+
+       if (!setup)
+               return NULL;
+
+       div = kzalloc(sizeof(*div), GFP_KERNEL);
+       if (!div)
+               return ERR_PTR(-ENOMEM);
+
+       reg = (struct clk_omap_reg *)&div->reg;
+       reg->index = setup->module;
+       reg->offset = setup->reg;
+
+       if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
+               div->flags |= CLK_DIVIDER_ONE_BASED;
+
+       if (setup->flags & CLKF_INDEX_POWER_OF_TWO)
+               div->flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+       div->table = _get_div_table_from_setup(setup, &div->width);
+
+       div->shift = setup->bit_shift;
+
+       return &div->hw;
+}
+
+struct clk *ti_clk_register_divider(struct ti_clk *setup)
+{
+       struct ti_clk_divider *div;
+       struct clk_omap_reg *reg_setup;
+       u32 reg;
+       u8 width;
+       u32 flags = 0;
+       u8 div_flags = 0;
+       struct clk_div_table *table;
+       struct clk *clk;
+
+       div = setup->data;
+
+       reg_setup = (struct clk_omap_reg *)&reg;
+
+       reg_setup->index = div->module;
+       reg_setup->offset = div->reg;
+
+       if (div->flags & CLKF_INDEX_STARTS_AT_ONE)
+               div_flags |= CLK_DIVIDER_ONE_BASED;
+
+       if (div->flags & CLKF_INDEX_POWER_OF_TWO)
+               div_flags |= CLK_DIVIDER_POWER_OF_TWO;
+
+       if (div->flags & CLKF_SET_RATE_PARENT)
+               flags |= CLK_SET_RATE_PARENT;
+
+       table = _get_div_table_from_setup(div, &width);
+       if (IS_ERR(table))
+               return (struct clk *)table;
+
+       clk = _register_divider(NULL, setup->name, div->parent,
+                               flags, (void __iomem *)reg, div->bit_shift,
+                               width, div_flags, table, NULL);
+
+       if (IS_ERR(clk))
+               kfree(table);
+
+       return clk;
+}
+
 static struct clk_div_table *
 __init ti_clk_get_div_table(struct device_node *node)
 {
@@ -455,7 +584,8 @@ static void __init of_ti_divider_clk_setup(struct device_node *node)
                goto cleanup;
 
        clk = _register_divider(NULL, node->name, parent_name, flags, reg,
-                               shift, width, clk_divider_flags, table, NULL);
+                               shift, width, clk_divider_flags, table,
+                               NULL);
 
        if (!IS_ERR(clk)) {
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
index 85ac0dd501dea5fff98801ba47edabbf972492a5..81dc4698dc41740e77e82411b00c67ae9aa148ba 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -130,7 +131,7 @@ static const struct clk_ops dpll_x2_ck_ops = {
 };
 
 /**
- * ti_clk_register_dpll - low level registration of a DPLL clock
+ * _register_dpll - low level registration of a DPLL clock
  * @hw: hardware clock definition for the clock
  * @node: device node for the clock
  *
@@ -138,8 +139,8 @@ static const struct clk_ops dpll_x2_ck_ops = {
  * clk-bypass is missing), the clock is added to retry list and
  * the initialization is retried on later stage.
  */
-static void __init ti_clk_register_dpll(struct clk_hw *hw,
-                                       struct device_node *node)
+static void __init _register_dpll(struct clk_hw *hw,
+                                 struct device_node *node)
 {
        struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
        struct dpll_data *dd = clk_hw->dpll_data;
@@ -151,7 +152,7 @@ static void __init ti_clk_register_dpll(struct clk_hw *hw,
        if (IS_ERR(dd->clk_ref) || IS_ERR(dd->clk_bypass)) {
                pr_debug("clk-ref or clk-bypass missing for %s, retry later\n",
                         node->name);
-               if (!ti_clk_retry_init(node, hw, ti_clk_register_dpll))
+               if (!ti_clk_retry_init(node, hw, _register_dpll))
                        return;
 
                goto cleanup;
@@ -175,20 +176,118 @@ cleanup:
        kfree(clk_hw);
 }
 
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+void __iomem *_get_reg(u8 module, u16 offset)
+{
+       u32 reg;
+       struct clk_omap_reg *reg_setup;
+
+       reg_setup = (struct clk_omap_reg *)&reg;
+
+       reg_setup->index = module;
+       reg_setup->offset = offset;
+
+       return (void __iomem *)reg;
+}
+
+struct clk *ti_clk_register_dpll(struct ti_clk *setup)
+{
+       struct clk_hw_omap *clk_hw;
+       struct clk_init_data init = { NULL };
+       struct dpll_data *dd;
+       struct clk *clk;
+       struct ti_clk_dpll *dpll;
+       const struct clk_ops *ops = &omap3_dpll_ck_ops;
+       struct clk *clk_ref;
+       struct clk *clk_bypass;
+
+       dpll = setup->data;
+
+       if (dpll->num_parents < 2)
+               return ERR_PTR(-EINVAL);
+
+       clk_ref = clk_get_sys(NULL, dpll->parents[0]);
+       clk_bypass = clk_get_sys(NULL, dpll->parents[1]);
+
+       if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
+               return ERR_PTR(-EAGAIN);
+
+       dd = kzalloc(sizeof(*dd), GFP_KERNEL);
+       clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+       if (!dd || !clk_hw) {
+               clk = ERR_PTR(-ENOMEM);
+               goto cleanup;
+       }
+
+       clk_hw->dpll_data = dd;
+       clk_hw->ops = &clkhwops_omap3_dpll;
+       clk_hw->hw.init = &init;
+       clk_hw->flags = MEMMAP_ADDRESSING;
+
+       init.name = setup->name;
+       init.ops = ops;
+
+       init.num_parents = dpll->num_parents;
+       init.parent_names = dpll->parents;
+
+       dd->control_reg = _get_reg(dpll->module, dpll->control_reg);
+       dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg);
+       dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg);
+       dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg);
+
+       dd->modes = dpll->modes;
+       dd->div1_mask = dpll->div1_mask;
+       dd->idlest_mask = dpll->idlest_mask;
+       dd->mult_mask = dpll->mult_mask;
+       dd->autoidle_mask = dpll->autoidle_mask;
+       dd->enable_mask = dpll->enable_mask;
+       dd->sddiv_mask = dpll->sddiv_mask;
+       dd->dco_mask = dpll->dco_mask;
+       dd->max_divider = dpll->max_divider;
+       dd->min_divider = dpll->min_divider;
+       dd->max_multiplier = dpll->max_multiplier;
+       dd->auto_recal_bit = dpll->auto_recal_bit;
+       dd->recal_en_bit = dpll->recal_en_bit;
+       dd->recal_st_bit = dpll->recal_st_bit;
+
+       dd->clk_ref = clk_ref;
+       dd->clk_bypass = clk_bypass;
+
+       if (dpll->flags & CLKF_CORE)
+               ops = &omap3_dpll_core_ck_ops;
+
+       if (dpll->flags & CLKF_PER)
+               ops = &omap3_dpll_per_ck_ops;
+
+       if (dpll->flags & CLKF_J_TYPE)
+               dd->flags |= DPLL_J_TYPE;
+
+       clk = clk_register(NULL, &clk_hw->hw);
+
+       if (!IS_ERR(clk))
+               return clk;
+
+cleanup:
+       kfree(dd);
+       kfree(clk_hw);
+       return clk;
+}
+#endif
+
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
        defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
        defined(CONFIG_SOC_AM43XX)
 /**
- * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
+ * _register_dpll_x2 - Registers a DPLLx2 clock
  * @node: device node for this clock
  * @ops: clk_ops for this clock
  * @hw_ops: clk_hw_ops for this clock
  *
  * Initializes a DPLL x 2 clock from device tree data.
  */
-static void ti_clk_register_dpll_x2(struct device_node *node,
-                                   const struct clk_ops *ops,
-                                   const struct clk_hw_omap_ops *hw_ops)
+static void _register_dpll_x2(struct device_node *node,
+                             const struct clk_ops *ops,
+                             const struct clk_hw_omap_ops *hw_ops)
 {
        struct clk *clk;
        struct clk_init_data init = { NULL };
@@ -318,7 +417,7 @@ static void __init of_ti_dpll_setup(struct device_node *node,
        if (dpll_mode)
                dd->modes = dpll_mode;
 
-       ti_clk_register_dpll(&clk_hw->hw, node);
+       _register_dpll(&clk_hw->hw, node);
        return;
 
 cleanup:
@@ -332,7 +431,7 @@ cleanup:
        defined(CONFIG_SOC_DRA7XX)
 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
 {
-       ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
+       _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
 }
 CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
               of_ti_omap4_dpll_x2_setup);
@@ -341,7 +440,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
 {
-       ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
+       _register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
 }
 CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
               of_ti_am3_dpll_x2_setup);
diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c
new file mode 100644 (file)
index 0000000..6ef8963
--- /dev/null
@@ -0,0 +1,410 @@
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk/ti.h>
+#include <asm/div64.h>
+
+/* FAPLL Control Register PLL_CTRL */
+#define FAPLL_MAIN_LOCK                BIT(7)
+#define FAPLL_MAIN_PLLEN       BIT(3)
+#define FAPLL_MAIN_BP          BIT(2)
+#define FAPLL_MAIN_LOC_CTL     BIT(0)
+
+/* FAPLL powerdown register PWD */
+#define FAPLL_PWD_OFFSET       4
+
+#define MAX_FAPLL_OUTPUTS      7
+#define FAPLL_MAX_RETRIES      1000
+
+#define to_fapll(_hw)          container_of(_hw, struct fapll_data, hw)
+#define to_synth(_hw)          container_of(_hw, struct fapll_synth, hw)
+
+/* The bypass bit is inverted on the ddr_pll.. */
+#define fapll_is_ddr_pll(va)   (((u32)(va) & 0xffff) == 0x0440)
+
+/*
+ * The audio_pll_clk1 input is hard wired to the 27MHz bypass clock,
+ * and the audio_pll_clk1 synthesizer is hardwared to 32KiHz output.
+ */
+#define is_ddr_pll_clk1(va)    (((u32)(va) & 0xffff) == 0x044c)
+#define is_audio_pll_clk1(va)  (((u32)(va) & 0xffff) == 0x04a8)
+
+/* Synthesizer divider register */
+#define SYNTH_LDMDIV1          BIT(8)
+
+/* Synthesizer frequency register */
+#define SYNTH_LDFREQ           BIT(31)
+
+struct fapll_data {
+       struct clk_hw hw;
+       void __iomem *base;
+       const char *name;
+       struct clk *clk_ref;
+       struct clk *clk_bypass;
+       struct clk_onecell_data outputs;
+       bool bypass_bit_inverted;
+};
+
+struct fapll_synth {
+       struct clk_hw hw;
+       struct fapll_data *fd;
+       int index;
+       void __iomem *freq;
+       void __iomem *div;
+       const char *name;
+       struct clk *clk_pll;
+};
+
+static bool ti_fapll_clock_is_bypass(struct fapll_data *fd)
+{
+       u32 v = readl_relaxed(fd->base);
+
+       if (fd->bypass_bit_inverted)
+               return !(v & FAPLL_MAIN_BP);
+       else
+               return !!(v & FAPLL_MAIN_BP);
+}
+
+static int ti_fapll_enable(struct clk_hw *hw)
+{
+       struct fapll_data *fd = to_fapll(hw);
+       u32 v = readl_relaxed(fd->base);
+
+       v |= (1 << FAPLL_MAIN_PLLEN);
+       writel_relaxed(v, fd->base);
+
+       return 0;
+}
+
+static void ti_fapll_disable(struct clk_hw *hw)
+{
+       struct fapll_data *fd = to_fapll(hw);
+       u32 v = readl_relaxed(fd->base);
+
+       v &= ~(1 << FAPLL_MAIN_PLLEN);
+       writel_relaxed(v, fd->base);
+}
+
+static int ti_fapll_is_enabled(struct clk_hw *hw)
+{
+       struct fapll_data *fd = to_fapll(hw);
+       u32 v = readl_relaxed(fd->base);
+
+       return v & (1 << FAPLL_MAIN_PLLEN);
+}
+
+static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw,
+                                         unsigned long parent_rate)
+{
+       struct fapll_data *fd = to_fapll(hw);
+       u32 fapll_n, fapll_p, v;
+       long long rate;
+
+       if (ti_fapll_clock_is_bypass(fd))
+               return parent_rate;
+
+       rate = parent_rate;
+
+       /* PLL pre-divider is P and multiplier is N */
+       v = readl_relaxed(fd->base);
+       fapll_p = (v >> 8) & 0xff;
+       if (fapll_p)
+               do_div(rate, fapll_p);
+       fapll_n = v >> 16;
+       if (fapll_n)
+               rate *= fapll_n;
+
+       return rate;
+}
+
+static u8 ti_fapll_get_parent(struct clk_hw *hw)
+{
+       struct fapll_data *fd = to_fapll(hw);
+
+       if (ti_fapll_clock_is_bypass(fd))
+               return 1;
+
+       return 0;
+}
+
+static struct clk_ops ti_fapll_ops = {
+       .enable = ti_fapll_enable,
+       .disable = ti_fapll_disable,
+       .is_enabled = ti_fapll_is_enabled,
+       .recalc_rate = ti_fapll_recalc_rate,
+       .get_parent = ti_fapll_get_parent,
+};
+
+static int ti_fapll_synth_enable(struct clk_hw *hw)
+{
+       struct fapll_synth *synth = to_synth(hw);
+       u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+       v &= ~(1 << synth->index);
+       writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
+
+       return 0;
+}
+
+static void ti_fapll_synth_disable(struct clk_hw *hw)
+{
+       struct fapll_synth *synth = to_synth(hw);
+       u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+       v |= 1 << synth->index;
+       writel_relaxed(v, synth->fd->base + FAPLL_PWD_OFFSET);
+}
+
+static int ti_fapll_synth_is_enabled(struct clk_hw *hw)
+{
+       struct fapll_synth *synth = to_synth(hw);
+       u32 v = readl_relaxed(synth->fd->base + FAPLL_PWD_OFFSET);
+
+       return !(v & (1 << synth->index));
+}
+
+/*
+ * See dm816x TRM chapter 1.10.3 Flying Adder PLL fore more info
+ */
+static unsigned long ti_fapll_synth_recalc_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       struct fapll_synth *synth = to_synth(hw);
+       u32 synth_div_m;
+       long long rate;
+
+       /* The audio_pll_clk1 is hardwired to produce 32.768KiHz clock */
+       if (!synth->div)
+               return 32768;
+
+       /*
+        * PLL in bypass sets the synths in bypass mode too. The PLL rate
+        * can be also be set to 27MHz, so we can't use parent_rate to
+        * check for bypass mode.
+        */
+       if (ti_fapll_clock_is_bypass(synth->fd))
+               return parent_rate;
+
+       rate = parent_rate;
+
+       /*
+        * Synth frequency integer and fractional divider.
+        * Note that the phase output K is 8, so the result needs
+        * to be multiplied by 8.
+        */
+       if (synth->freq) {
+               u32 v, synth_int_div, synth_frac_div, synth_div_freq;
+
+               v = readl_relaxed(synth->freq);
+               synth_int_div = (v >> 24) & 0xf;
+               synth_frac_div = v & 0xffffff;
+               synth_div_freq = (synth_int_div * 10000000) + synth_frac_div;
+               rate *= 10000000;
+               do_div(rate, synth_div_freq);
+               rate *= 8;
+       }
+
+       /* Synth ost-divider M */
+       synth_div_m = readl_relaxed(synth->div) & 0xff;
+       do_div(rate, synth_div_m);
+
+       return rate;
+}
+
+static struct clk_ops ti_fapll_synt_ops = {
+       .enable = ti_fapll_synth_enable,
+       .disable = ti_fapll_synth_disable,
+       .is_enabled = ti_fapll_synth_is_enabled,
+       .recalc_rate = ti_fapll_synth_recalc_rate,
+};
+
+static struct clk * __init ti_fapll_synth_setup(struct fapll_data *fd,
+                                               void __iomem *freq,
+                                               void __iomem *div,
+                                               int index,
+                                               const char *name,
+                                               const char *parent,
+                                               struct clk *pll_clk)
+{
+       struct clk_init_data *init;
+       struct fapll_synth *synth;
+
+       init = kzalloc(sizeof(*init), GFP_KERNEL);
+       if (!init)
+               return ERR_PTR(-ENOMEM);
+
+       init->ops = &ti_fapll_synt_ops;
+       init->name = name;
+       init->parent_names = &parent;
+       init->num_parents = 1;
+
+       synth = kzalloc(sizeof(*synth), GFP_KERNEL);
+       if (!synth)
+               goto free;
+
+       synth->fd = fd;
+       synth->index = index;
+       synth->freq = freq;
+       synth->div = div;
+       synth->name = name;
+       synth->hw.init = init;
+       synth->clk_pll = pll_clk;
+
+       return clk_register(NULL, &synth->hw);
+
+free:
+       kfree(synth);
+       kfree(init);
+
+       return ERR_PTR(-ENOMEM);
+}
+
+static void __init ti_fapll_setup(struct device_node *node)
+{
+       struct fapll_data *fd;
+       struct clk_init_data *init = NULL;
+       const char *parent_name[2];
+       struct clk *pll_clk;
+       int i;
+
+       fd = kzalloc(sizeof(*fd), GFP_KERNEL);
+       if (!fd)
+               return;
+
+       fd->outputs.clks = kzalloc(sizeof(struct clk *) *
+                                  MAX_FAPLL_OUTPUTS + 1,
+                                  GFP_KERNEL);
+       if (!fd->outputs.clks)
+               goto free;
+
+       init = kzalloc(sizeof(*init), GFP_KERNEL);
+       if (!init)
+               goto free;
+
+       init->ops = &ti_fapll_ops;
+       init->name = node->name;
+
+       init->num_parents = of_clk_get_parent_count(node);
+       if (init->num_parents != 2) {
+               pr_err("%s must have two parents\n", node->name);
+               goto free;
+       }
+
+       parent_name[0] = of_clk_get_parent_name(node, 0);
+       parent_name[1] = of_clk_get_parent_name(node, 1);
+       init->parent_names = parent_name;
+
+       fd->clk_ref = of_clk_get(node, 0);
+       if (IS_ERR(fd->clk_ref)) {
+               pr_err("%s could not get clk_ref\n", node->name);
+               goto free;
+       }
+
+       fd->clk_bypass = of_clk_get(node, 1);
+       if (IS_ERR(fd->clk_bypass)) {
+               pr_err("%s could not get clk_bypass\n", node->name);
+               goto free;
+       }
+
+       fd->base = of_iomap(node, 0);
+       if (!fd->base) {
+               pr_err("%s could not get IO base\n", node->name);
+               goto free;
+       }
+
+       if (fapll_is_ddr_pll(fd->base))
+               fd->bypass_bit_inverted = true;
+
+       fd->name = node->name;
+       fd->hw.init = init;
+
+       /* Register the parent PLL */
+       pll_clk = clk_register(NULL, &fd->hw);
+       if (IS_ERR(pll_clk))
+               goto unmap;
+
+       fd->outputs.clks[0] = pll_clk;
+       fd->outputs.clk_num++;
+
+       /*
+        * Set up the child synthesizers starting at index 1 as the
+        * PLL output is at index 0. We need to check the clock-indices
+        * for numbering in case there are holes in the synth mapping,
+        * and then probe the synth register to see if it has a FREQ
+        * register available.
+        */
+       for (i = 0; i < MAX_FAPLL_OUTPUTS; i++) {
+               const char *output_name;
+               void __iomem *freq, *div;
+               struct clk *synth_clk;
+               int output_instance;
+               u32 v;
+
+               if (of_property_read_string_index(node, "clock-output-names",
+                                                 i, &output_name))
+                       continue;
+
+               if (of_property_read_u32_index(node, "clock-indices", i,
+                                              &output_instance))
+                       output_instance = i;
+
+               freq = fd->base + (output_instance * 8);
+               div = freq + 4;
+
+               /* Check for hardwired audio_pll_clk1 */
+               if (is_audio_pll_clk1(freq)) {
+                       freq = 0;
+                       div = 0;
+               } else {
+                       /* Does the synthesizer have a FREQ register? */
+                       v = readl_relaxed(freq);
+                       if (!v)
+                               freq = 0;
+               }
+               synth_clk = ti_fapll_synth_setup(fd, freq, div, output_instance,
+                                                output_name, node->name,
+                                                pll_clk);
+               if (IS_ERR(synth_clk))
+                       continue;
+
+               fd->outputs.clks[output_instance] = synth_clk;
+               fd->outputs.clk_num++;
+
+               clk_register_clkdev(synth_clk, output_name, NULL);
+       }
+
+       /* Register the child synthesizers as the FAPLL outputs */
+       of_clk_add_provider(node, of_clk_src_onecell_get, &fd->outputs);
+       /* Add clock alias for the outputs */
+
+       kfree(init);
+
+       return;
+
+unmap:
+       iounmap(fd->base);
+free:
+       if (fd->clk_bypass)
+               clk_put(fd->clk_bypass);
+       if (fd->clk_ref)
+               clk_put(fd->clk_ref);
+       kfree(fd->outputs.clks);
+       kfree(fd);
+       kfree(init);
+}
+
+CLK_OF_DECLARE(ti_fapll_clock, "ti,dm816-fapll-clock", ti_fapll_setup);
index b326d2797feb23c0ed84fe52be5b9905102f0234..d493307b73f42b0bdef4c36544e5146ce00b977e 100644 (file)
@@ -22,6 +22,8 @@
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
 
+#include "clock.h"
+
 #define to_clk_divider(_hw) container_of(_hw, struct clk_divider, hw)
 
 #undef pr_fmt
@@ -90,63 +92,164 @@ static int omap36xx_gate_clk_enable_with_hsdiv_restore(struct clk_hw *clk)
        return ret;
 }
 
-static void __init _of_ti_gate_clk_setup(struct device_node *node,
-                                        const struct clk_ops *ops,
-                                        const struct clk_hw_omap_ops *hw_ops)
+static struct clk *_register_gate(struct device *dev, const char *name,
+                                 const char *parent_name, unsigned long flags,
+                                 void __iomem *reg, u8 bit_idx,
+                                 u8 clk_gate_flags, const struct clk_ops *ops,
+                                 const struct clk_hw_omap_ops *hw_ops)
 {
-       struct clk *clk;
        struct clk_init_data init = { NULL };
        struct clk_hw_omap *clk_hw;
-       const char *clk_name = node->name;
-       const char *parent_name;
-       u32 val;
+       struct clk *clk;
 
        clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
        if (!clk_hw)
-               return;
+               return ERR_PTR(-ENOMEM);
 
        clk_hw->hw.init = &init;
 
-       init.name = clk_name;
+       init.name = name;
        init.ops = ops;
 
-       if (ops != &omap_gate_clkdm_clk_ops) {
-               clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
-               if (!clk_hw->enable_reg)
-                       goto cleanup;
+       clk_hw->enable_reg = reg;
+       clk_hw->enable_bit = bit_idx;
+       clk_hw->ops = hw_ops;
 
-               if (!of_property_read_u32(node, "ti,bit-shift", &val))
-                       clk_hw->enable_bit = val;
+       clk_hw->flags = MEMMAP_ADDRESSING | clk_gate_flags;
+
+       init.parent_names = &parent_name;
+       init.num_parents = 1;
+
+       init.flags = flags;
+
+       clk = clk_register(NULL, &clk_hw->hw);
+
+       if (IS_ERR(clk))
+               kfree(clk_hw);
+
+       return clk;
+}
+
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+struct clk *ti_clk_register_gate(struct ti_clk *setup)
+{
+       const struct clk_ops *ops = &omap_gate_clk_ops;
+       const struct clk_hw_omap_ops *hw_ops = NULL;
+       u32 reg;
+       struct clk_omap_reg *reg_setup;
+       u32 flags = 0;
+       u8 clk_gate_flags = 0;
+       struct ti_clk_gate *gate;
+
+       gate = setup->data;
+
+       if (gate->flags & CLKF_INTERFACE)
+               return ti_clk_register_interface(setup);
+
+       reg_setup = (struct clk_omap_reg *)&reg;
+
+       if (gate->flags & CLKF_SET_RATE_PARENT)
+               flags |= CLK_SET_RATE_PARENT;
+
+       if (gate->flags & CLKF_SET_BIT_TO_DISABLE)
+               clk_gate_flags |= INVERT_ENABLE;
+
+       if (gate->flags & CLKF_HSDIV) {
+               ops = &omap_gate_clk_hsdiv_restore_ops;
+               hw_ops = &clkhwops_wait;
        }
 
-       clk_hw->ops = hw_ops;
+       if (gate->flags & CLKF_DSS)
+               hw_ops = &clkhwops_omap3430es2_dss_usbhost_wait;
+
+       if (gate->flags & CLKF_WAIT)
+               hw_ops = &clkhwops_wait;
+
+       if (gate->flags & CLKF_CLKDM)
+               ops = &omap_gate_clkdm_clk_ops;
+
+       if (gate->flags & CLKF_AM35XX)
+               hw_ops = &clkhwops_am35xx_ipss_module_wait;
 
-       clk_hw->flags = MEMMAP_ADDRESSING;
+       reg_setup->index = gate->module;
+       reg_setup->offset = gate->reg;
+
+       return _register_gate(NULL, setup->name, gate->parent, flags,
+                             (void __iomem *)reg, gate->bit_shift,
+                             clk_gate_flags, ops, hw_ops);
+}
+
+struct clk_hw *ti_clk_build_component_gate(struct ti_clk_gate *setup)
+{
+       struct clk_hw_omap *gate;
+       struct clk_omap_reg *reg;
+       const struct clk_hw_omap_ops *ops = &clkhwops_wait;
+
+       if (!setup)
+               return NULL;
+
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate)
+               return ERR_PTR(-ENOMEM);
+
+       reg = (struct clk_omap_reg *)&gate->enable_reg;
+       reg->index = setup->module;
+       reg->offset = setup->reg;
+
+       gate->enable_bit = setup->bit_shift;
+
+       if (setup->flags & CLKF_NO_WAIT)
+               ops = NULL;
+
+       if (setup->flags & CLKF_INTERFACE)
+               ops = &clkhwops_iclk_wait;
+
+       gate->ops = ops;
+       gate->flags = MEMMAP_ADDRESSING;
+
+       return &gate->hw;
+}
+#endif
+
+static void __init _of_ti_gate_clk_setup(struct device_node *node,
+                                        const struct clk_ops *ops,
+                                        const struct clk_hw_omap_ops *hw_ops)
+{
+       struct clk *clk;
+       const char *parent_name;
+       void __iomem *reg = NULL;
+       u8 enable_bit = 0;
+       u32 val;
+       u32 flags = 0;
+       u8 clk_gate_flags = 0;
+
+       if (ops != &omap_gate_clkdm_clk_ops) {
+               reg = ti_clk_get_reg_addr(node, 0);
+               if (!reg)
+                       return;
+
+               if (!of_property_read_u32(node, "ti,bit-shift", &val))
+                       enable_bit = val;
+       }
 
        if (of_clk_get_parent_count(node) != 1) {
-               pr_err("%s must have 1 parent\n", clk_name);
-               goto cleanup;
+               pr_err("%s must have 1 parent\n", node->name);
+               return;
        }
 
        parent_name = of_clk_get_parent_name(node, 0);
-       init.parent_names = &parent_name;
-       init.num_parents = 1;
 
        if (of_property_read_bool(node, "ti,set-rate-parent"))
-               init.flags |= CLK_SET_RATE_PARENT;
+               flags |= CLK_SET_RATE_PARENT;
 
        if (of_property_read_bool(node, "ti,set-bit-to-disable"))
-               clk_hw->flags |= INVERT_ENABLE;
+               clk_gate_flags |= INVERT_ENABLE;
 
-       clk = clk_register(NULL, &clk_hw->hw);
+       clk = _register_gate(NULL, node->name, parent_name, flags, reg,
+                            enable_bit, clk_gate_flags, ops, hw_ops);
 
-       if (!IS_ERR(clk)) {
+       if (!IS_ERR(clk))
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
-               return;
-       }
-
-cleanup:
-       kfree(clk_hw);
 }
 
 static void __init
index 9c3e8c4aaa40c0b8a46048bab734286941a77e6b..265d91f071c5e34554cc71c474278997917a7a6f 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -31,53 +32,102 @@ static const struct clk_ops ti_interface_clk_ops = {
        .is_enabled     = &omap2_dflt_clk_is_enabled,
 };
 
-static void __init _of_ti_interface_clk_setup(struct device_node *node,
-                                             const struct clk_hw_omap_ops *ops)
+static struct clk *_register_interface(struct device *dev, const char *name,
+                                      const char *parent_name,
+                                      void __iomem *reg, u8 bit_idx,
+                                      const struct clk_hw_omap_ops *ops)
 {
-       struct clk *clk;
        struct clk_init_data init = { NULL };
        struct clk_hw_omap *clk_hw;
-       const char *parent_name;
-       u32 val;
+       struct clk *clk;
 
        clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
        if (!clk_hw)
-               return;
+               return ERR_PTR(-ENOMEM);
 
        clk_hw->hw.init = &init;
        clk_hw->ops = ops;
        clk_hw->flags = MEMMAP_ADDRESSING;
+       clk_hw->enable_reg = reg;
+       clk_hw->enable_bit = bit_idx;
 
-       clk_hw->enable_reg = ti_clk_get_reg_addr(node, 0);
-       if (!clk_hw->enable_reg)
-               goto cleanup;
-
-       if (!of_property_read_u32(node, "ti,bit-shift", &val))
-               clk_hw->enable_bit = val;
-
-       init.name = node->name;
+       init.name = name;
        init.ops = &ti_interface_clk_ops;
        init.flags = 0;
 
-       parent_name = of_clk_get_parent_name(node, 0);
-       if (!parent_name) {
-               pr_err("%s must have a parent\n", node->name);
-               goto cleanup;
-       }
-
        init.num_parents = 1;
        init.parent_names = &parent_name;
 
        clk = clk_register(NULL, &clk_hw->hw);
 
-       if (!IS_ERR(clk)) {
-               of_clk_add_provider(node, of_clk_src_simple_get, clk);
+       if (IS_ERR(clk))
+               kfree(clk_hw);
+       else
                omap2_init_clk_hw_omap_clocks(clk);
+
+       return clk;
+}
+
+#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
+struct clk *ti_clk_register_interface(struct ti_clk *setup)
+{
+       const struct clk_hw_omap_ops *ops = &clkhwops_iclk_wait;
+       u32 reg;
+       struct clk_omap_reg *reg_setup;
+       struct ti_clk_gate *gate;
+
+       gate = setup->data;
+       reg_setup = (struct clk_omap_reg *)&reg;
+       reg_setup->index = gate->module;
+       reg_setup->offset = gate->reg;
+
+       if (gate->flags & CLKF_NO_WAIT)
+               ops = &clkhwops_iclk;
+
+       if (gate->flags & CLKF_HSOTGUSB)
+               ops = &clkhwops_omap3430es2_iclk_hsotgusb_wait;
+
+       if (gate->flags & CLKF_DSS)
+               ops = &clkhwops_omap3430es2_iclk_dss_usbhost_wait;
+
+       if (gate->flags & CLKF_SSI)
+               ops = &clkhwops_omap3430es2_iclk_ssi_wait;
+
+       if (gate->flags & CLKF_AM35XX)
+               ops = &clkhwops_am35xx_ipss_wait;
+
+       return _register_interface(NULL, setup->name, gate->parent,
+                                  (void __iomem *)reg, gate->bit_shift, ops);
+}
+#endif
+
+static void __init _of_ti_interface_clk_setup(struct device_node *node,
+                                             const struct clk_hw_omap_ops *ops)
+{
+       struct clk *clk;
+       const char *parent_name;
+       void __iomem *reg;
+       u8 enable_bit = 0;
+       u32 val;
+
+       reg = ti_clk_get_reg_addr(node, 0);
+       if (!reg)
+               return;
+
+       if (!of_property_read_u32(node, "ti,bit-shift", &val))
+               enable_bit = val;
+
+       parent_name = of_clk_get_parent_name(node, 0);
+       if (!parent_name) {
+               pr_err("%s must have a parent\n", node->name);
                return;
        }
 
-cleanup:
-       kfree(clk_hw);
+       clk = _register_interface(NULL, node->name, parent_name, reg,
+                                 enable_bit, ops);
+
+       if (!IS_ERR(clk))
+               of_clk_add_provider(node, of_clk_src_simple_get, clk);
 }
 
 static void __init of_ti_interface_clk_setup(struct device_node *node)
index e9d650e51287d50fd53704636241445667bdcf7c..728e253606bce51a9435e6915ee1a445dcfff606 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/clk/ti.h>
+#include "clock.h"
 
 #undef pr_fmt
 #define pr_fmt(fmt) "%s: " fmt, __func__
@@ -144,6 +145,39 @@ static struct clk *_register_mux(struct device *dev, const char *name,
        return clk;
 }
 
+struct clk *ti_clk_register_mux(struct ti_clk *setup)
+{
+       struct ti_clk_mux *mux;
+       u32 flags;
+       u8 mux_flags = 0;
+       struct clk_omap_reg *reg_setup;
+       u32 reg;
+       u32 mask;
+
+       reg_setup = (struct clk_omap_reg *)&reg;
+
+       mux = setup->data;
+       flags = CLK_SET_RATE_NO_REPARENT;
+
+       mask = mux->num_parents;
+       if (!(mux->flags & CLKF_INDEX_STARTS_AT_ONE))
+               mask--;
+
+       mask = (1 << fls(mask)) - 1;
+       reg_setup->index = mux->module;
+       reg_setup->offset = mux->reg;
+
+       if (mux->flags & CLKF_INDEX_STARTS_AT_ONE)
+               mux_flags |= CLK_MUX_INDEX_ONE;
+
+       if (mux->flags & CLKF_SET_RATE_PARENT)
+               flags |= CLK_SET_RATE_PARENT;
+
+       return _register_mux(NULL, setup->name, mux->parents, mux->num_parents,
+                            flags, (void __iomem *)reg, mux->bit_shift, mask,
+                            mux_flags, NULL, NULL);
+}
+
 /**
  * of_mux_clk_setup - Setup function for simple mux rate clock
  * @node: DT node for the clock
@@ -194,8 +228,9 @@ static void of_mux_clk_setup(struct device_node *node)
 
        mask = (1 << fls(mask)) - 1;
 
-       clk = _register_mux(NULL, node->name, parent_names, num_parents, flags,
-                           reg, shift, mask, clk_mux_flags, NULL, NULL);
+       clk = _register_mux(NULL, node->name, parent_names, num_parents,
+                           flags, reg, shift, mask, clk_mux_flags, NULL,
+                           NULL);
 
        if (!IS_ERR(clk))
                of_clk_add_provider(node, of_clk_src_simple_get, clk);
@@ -205,6 +240,37 @@ cleanup:
 }
 CLK_OF_DECLARE(mux_clk, "ti,mux-clock", of_mux_clk_setup);
 
+struct clk_hw *ti_clk_build_component_mux(struct ti_clk_mux *setup)
+{
+       struct clk_mux *mux;
+       struct clk_omap_reg *reg;
+       int num_parents;
+
+       if (!setup)
+               return NULL;
+
+       mux = kzalloc(sizeof(*mux), GFP_KERNEL);
+       if (!mux)
+               return ERR_PTR(-ENOMEM);
+
+       reg = (struct clk_omap_reg *)&mux->reg;
+
+       mux->shift = setup->bit_shift;
+
+       reg->index = setup->module;
+       reg->offset = setup->reg;
+
+       if (setup->flags & CLKF_INDEX_STARTS_AT_ONE)
+               mux->flags |= CLK_MUX_INDEX_ONE;
+
+       num_parents = setup->num_parents;
+
+       mux->mask = num_parents - 1;
+       mux->mask = (1 << fls(mux->mask)) - 1;
+
+       return &mux->hw;
+}
+
 static void __init of_ti_composite_mux_clk_setup(struct device_node *node)
 {
        struct clk_mux *mux;
index bd4769a8448582284b2734b0a77cfa6484306e17..0e950769ed033185cf2ad95587958de586ad1246 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clk-private.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/err.h>
index e2d63bc47436d1ee51014a24dc21ebcc1131ca51..bf63c96acb1a2947ce7e274f2869b51ab8e95550 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/clk-provider.h>
-#include <linux/clk-private.h>
 #include <linux/mfd/dbx500-prcmu.h>
 #include <linux/slab.h>
 #include <linux/io.h>
index 9037bebd69f79cd9121d101913bb34d0cffa4420..f870aad57711f6a69d248dc56f0309251ed0da74 100644 (file)
@@ -303,6 +303,7 @@ static void __init zynq_clk_setup(struct device_node *np)
        clks[cpu_2x] = clk_register_gate(NULL, clk_output_name[cpu_2x],
                        "cpu_2x_div", CLK_IGNORE_UNUSED, SLCR_ARM_CLK_CTRL,
                        26, 0, &armclk_lock);
+       clk_prepare_enable(clks[cpu_2x]);
 
        clk = clk_register_fixed_factor(NULL, "cpu_1x_div", "cpu_div", 0, 1,
                        4 + 2 * tmp);
index 6e6730f9dfd16cd068b77ed1359c16aff4bd017b..3de5f3a9a104c10e74c4cc834d7a853936fab5e4 100644 (file)
@@ -12,7 +12,7 @@ menuconfig CONNECTOR
 if CONNECTOR
 
 config PROC_EVENTS
-       boolean "Report process events to userspace"
+       bool "Report process events to userspace"
        depends on CONNECTOR=y
        default y
        ---help---
index 0f9a2c3c0e0d3eb1699a6a19ebee0532872dbccc..1b06fc4640e23c2840cda6011d7c061eedd64a49 100644 (file)
@@ -26,13 +26,21 @@ config ARM_VEXPRESS_SPC_CPUFREQ
 
 
 config ARM_EXYNOS_CPUFREQ
-       bool
+       tristate "SAMSUNG EXYNOS CPUfreq Driver"
+       depends on CPU_EXYNOS4210 || SOC_EXYNOS4212 || SOC_EXYNOS4412 || SOC_EXYNOS5250
+       depends on THERMAL
+       help
+         This adds the CPUFreq driver for Samsung EXYNOS platforms.
+         Supported SoC versions are:
+            Exynos4210, Exynos4212, Exynos4412, and Exynos5250.
+
+         If in doubt, say N.
 
 config ARM_EXYNOS4210_CPUFREQ
        bool "SAMSUNG EXYNOS4210"
        depends on CPU_EXYNOS4210
+       depends on ARM_EXYNOS_CPUFREQ
        default y
-       select ARM_EXYNOS_CPUFREQ
        help
          This adds the CPUFreq driver for Samsung EXYNOS4210
          SoC (S5PV310 or S5PC210).
@@ -42,8 +50,8 @@ config ARM_EXYNOS4210_CPUFREQ
 config ARM_EXYNOS4X12_CPUFREQ
        bool "SAMSUNG EXYNOS4x12"
        depends on SOC_EXYNOS4212 || SOC_EXYNOS4412
+       depends on ARM_EXYNOS_CPUFREQ
        default y
-       select ARM_EXYNOS_CPUFREQ
        help
          This adds the CPUFreq driver for Samsung EXYNOS4X12
          SoC (EXYNOS4212 or EXYNOS4412).
@@ -53,28 +61,14 @@ config ARM_EXYNOS4X12_CPUFREQ
 config ARM_EXYNOS5250_CPUFREQ
        bool "SAMSUNG EXYNOS5250"
        depends on SOC_EXYNOS5250
+       depends on ARM_EXYNOS_CPUFREQ
        default y
-       select ARM_EXYNOS_CPUFREQ
        help
          This adds the CPUFreq driver for Samsung EXYNOS5250
          SoC.
 
          If in doubt, say N.
 
-config ARM_EXYNOS5440_CPUFREQ
-       bool "SAMSUNG EXYNOS5440"
-       depends on SOC_EXYNOS5440
-       depends on HAVE_CLK && OF
-       select PM_OPP
-       default y
-       help
-         This adds the CPUFreq driver for Samsung EXYNOS5440
-         SoC. The nature of exynos5440 clock controller is
-         different than previous exynos controllers so not using
-         the common exynos framework.
-
-         If in doubt, say N.
-
 config ARM_EXYNOS_CPU_FREQ_BOOST_SW
        bool "EXYNOS Frequency Overclocking - Software"
        depends on ARM_EXYNOS_CPUFREQ && THERMAL
@@ -90,6 +84,20 @@ config ARM_EXYNOS_CPU_FREQ_BOOST_SW
 
          If in doubt, say N.
 
+config ARM_EXYNOS5440_CPUFREQ
+       tristate "SAMSUNG EXYNOS5440"
+       depends on SOC_EXYNOS5440
+       depends on HAVE_CLK && OF
+       select PM_OPP
+       default y
+       help
+         This adds the CPUFreq driver for Samsung EXYNOS5440
+         SoC. The nature of exynos5440 clock controller is
+         different than previous exynos controllers so not using
+         the common exynos framework.
+
+         If in doubt, say N.
+
 config ARM_HIGHBANK_CPUFREQ
        tristate "Calxeda Highbank-based"
        depends on ARCH_HIGHBANK && CPUFREQ_DT && REGULATOR
index 72564b701b4a7018643c77de03f58a9092ff5f71..7ea24413cee6855c65daa7954bc4843622402e75 100644 (file)
@@ -26,7 +26,7 @@ config CPU_FREQ_MAPLE
 config PPC_CORENET_CPUFREQ
        tristate "CPU frequency scaling driver for Freescale E500MC SoCs"
        depends on PPC_E500MC && OF && COMMON_CLK
-       select CLK_PPC_CORENET
+       select CLK_QORIQ
        help
          This adds the CPUFreq driver support for Freescale e500mc,
          e5500 and e6500 series SoCs which are capable of changing
index 8b4220ac888b180ba6ef4b1cda06590c5e3ab59f..82a1821471fd870a9eba9323d63c4b925c06a97c 100644 (file)
@@ -52,10 +52,11 @@ obj-$(CONFIG_ARM_DT_BL_CPUFREQ)             += arm_big_little_dt.o
 
 obj-$(CONFIG_ARCH_DAVINCI)             += davinci-cpufreq.o
 obj-$(CONFIG_UX500_SOC_DB8500)         += dbx500-cpufreq.o
-obj-$(CONFIG_ARM_EXYNOS_CPUFREQ)       += exynos-cpufreq.o
-obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ)   += exynos4210-cpufreq.o
-obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ)   += exynos4x12-cpufreq.o
-obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ)   += exynos5250-cpufreq.o
+obj-$(CONFIG_ARM_EXYNOS_CPUFREQ)       += arm-exynos-cpufreq.o
+arm-exynos-cpufreq-y                                   := exynos-cpufreq.o
+arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4210_CPUFREQ)    += exynos4210-cpufreq.o
+arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ)    += exynos4x12-cpufreq.o
+arm-exynos-cpufreq-$(CONFIG_ARM_EXYNOS5250_CPUFREQ)    += exynos5250-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ)   += exynos5440-cpufreq.o
 obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ)     += highbank-cpufreq.o
 obj-$(CONFIG_ARM_IMX6Q_CPUFREQ)                += imx6q-cpufreq.o
index f99a0b0b7c06acf61baec88cd667f3003e595b9c..5e98c6b1f284b651f0b74d3673c55f997e47f081 100644 (file)
 #include <linux/cpufreq.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
+#include <linux/cpu_cooling.h>
+#include <linux/cpu.h>
 
 #include "exynos-cpufreq.h"
 
 static struct exynos_dvfs_info *exynos_info;
+static struct thermal_cooling_device *cdev;
 static struct regulator *arm_regulator;
 static unsigned int locking_frequency;
 
@@ -156,6 +159,7 @@ static struct cpufreq_driver exynos_driver = {
 
 static int exynos_cpufreq_probe(struct platform_device *pdev)
 {
+       struct device_node *cpus, *np;
        int ret = -EINVAL;
 
        exynos_info = kzalloc(sizeof(*exynos_info), GFP_KERNEL);
@@ -198,9 +202,36 @@ static int exynos_cpufreq_probe(struct platform_device *pdev)
        /* Done here as we want to capture boot frequency */
        locking_frequency = clk_get_rate(exynos_info->cpu_clk) / 1000;
 
-       if (!cpufreq_register_driver(&exynos_driver))
+       ret = cpufreq_register_driver(&exynos_driver);
+       if (ret)
+               goto err_cpufreq_reg;
+
+       cpus = of_find_node_by_path("/cpus");
+       if (!cpus) {
+               pr_err("failed to find cpus node\n");
+               return 0;
+       }
+
+       np = of_get_next_child(cpus, NULL);
+       if (!np) {
+               pr_err("failed to find cpus child node\n");
+               of_node_put(cpus);
                return 0;
+       }
+
+       if (of_find_property(np, "#cooling-cells", NULL)) {
+               cdev = of_cpufreq_cooling_register(np,
+                                                  cpu_present_mask);
+               if (IS_ERR(cdev))
+                       pr_err("running cpufreq without cooling device: %ld\n",
+                              PTR_ERR(cdev));
+       }
+       of_node_put(np);
+       of_node_put(cpus);
+
+       return 0;
 
+err_cpufreq_reg:
        dev_err(&pdev->dev, "failed to register cpufreq driver\n");
        regulator_put(arm_regulator);
 err_vdd_arm:
index 2fd53eaaec20bf30ef00d6a19226fba13670501a..d6d425773fa497274301eaa88f247fb8dd770e89 100644 (file)
@@ -263,7 +263,7 @@ out:
 }
 
 #ifdef CONFIG_ARM_S3C2416_CPUFREQ_VCORESCALE
-static void __init s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
+static void s3c2416_cpufreq_cfg_regulator(struct s3c2416_data *s3c_freq)
 {
        int count, v, i, found;
        struct cpufreq_frequency_table *pos;
@@ -333,7 +333,7 @@ static struct notifier_block s3c2416_cpufreq_reboot_notifier = {
        .notifier_call = s3c2416_cpufreq_reboot_notifier_evt,
 };
 
-static int __init s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
+static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy)
 {
        struct s3c2416_data *s3c_freq = &s3c2416_cpufreq;
        struct cpufreq_frequency_table *pos;
index d00f1cee45094a6c01e004934de6e2928d4e9222..733aa5153e7451f645a65e3452ed44d6d488054a 100644 (file)
@@ -144,11 +144,6 @@ static void s3c_cpufreq_setfvco(struct s3c_cpufreq_config *cfg)
        (cfg->info->set_fvco)(cfg);
 }
 
-static inline void s3c_cpufreq_resume_clocks(void)
-{
-       cpu_cur.info->resume_clocks();
-}
-
 static inline void s3c_cpufreq_updateclk(struct clk *clk,
                                         unsigned int freq)
 {
@@ -417,9 +412,6 @@ static int s3c_cpufreq_resume(struct cpufreq_policy *policy)
 
        last_target = ~0;       /* invalidate last_target setting */
 
-       /* first, find out what speed we resumed at. */
-       s3c_cpufreq_resume_clocks();
-
        /* whilst we will be called later on, we try and re-set the
         * cpu frequencies as soon as possible so that we do not end
         * up resuming devices and then immediately having to re-set
@@ -454,7 +446,7 @@ static struct cpufreq_driver s3c24xx_driver = {
 };
 
 
-int __init s3c_cpufreq_register(struct s3c_cpufreq_info *info)
+int s3c_cpufreq_register(struct s3c_cpufreq_info *info)
 {
        if (!info || !info->name) {
                printk(KERN_ERR "%s: failed to pass valid information\n",
index aedec09579340b2db42095e3acebcbc4776543c8..59372077ec7c1a1b7d64e5ac67880b5e625526b5 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/notifier.h>
 #include <linux/clockchips.h>
 #include <linux/of.h>
+#include <linux/slab.h>
 
 #include <asm/machdep.h>
 #include <asm/firmware.h>
@@ -158,70 +159,83 @@ static int powernv_add_idle_states(void)
        struct device_node *power_mgt;
        int nr_idle_states = 1; /* Snooze */
        int dt_idle_states;
-       const __be32 *idle_state_flags;
-       const __be32 *idle_state_latency;
-       u32 len_flags, flags, latency_ns;
-       int i;
+       u32 *latency_ns, *residency_ns, *flags;
+       int i, rc;
 
        /* Currently we have snooze statically defined */
 
        power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
        if (!power_mgt) {
                pr_warn("opal: PowerMgmt Node not found\n");
-               return nr_idle_states;
+               goto out;
        }
 
-       idle_state_flags = of_get_property(power_mgt, "ibm,cpu-idle-state-flags", &len_flags);
-       if (!idle_state_flags) {
-               pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
-               return nr_idle_states;
+       /* Read values of any property to determine the num of idle states */
+       dt_idle_states = of_property_count_u32_elems(power_mgt, "ibm,cpu-idle-state-flags");
+       if (dt_idle_states < 0) {
+               pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+               goto out;
        }
 
-       idle_state_latency = of_get_property(power_mgt,
-                       "ibm,cpu-idle-state-latencies-ns", NULL);
-       if (!idle_state_latency) {
-               pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n");
-               return nr_idle_states;
+       flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
+       if (of_property_read_u32_array(power_mgt,
+                       "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
+               pr_warn("cpuidle-powernv : missing ibm,cpu-idle-state-flags in DT\n");
+               goto out_free_flags;
        }
 
-       dt_idle_states = len_flags / sizeof(u32);
+       latency_ns = kzalloc(sizeof(*latency_ns) * dt_idle_states, GFP_KERNEL);
+       rc = of_property_read_u32_array(power_mgt,
+               "ibm,cpu-idle-state-latencies-ns", latency_ns, dt_idle_states);
+       if (rc) {
+               pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-latencies-ns in DT\n");
+               goto out_free_latency;
+       }
 
-       for (i = 0; i < dt_idle_states; i++) {
+       residency_ns = kzalloc(sizeof(*residency_ns) * dt_idle_states, GFP_KERNEL);
+       rc = of_property_read_u32_array(power_mgt,
+               "ibm,cpu-idle-state-residency-ns", residency_ns, dt_idle_states);
 
-               flags = be32_to_cpu(idle_state_flags[i]);
+       for (i = 0; i < dt_idle_states; i++) {
 
-               /* Cpuidle accepts exit_latency in us and we estimate
-                * target residency to be 10x exit_latency
+               /*
+                * Cpuidle accepts exit_latency and target_residency in us.
+                * Use default target_residency values if f/w does not expose it.
                 */
-               latency_ns = be32_to_cpu(idle_state_latency[i]);
-               if (flags & OPAL_PM_NAP_ENABLED) {
+               if (flags[i] & OPAL_PM_NAP_ENABLED) {
                        /* Add NAP state */
                        strcpy(powernv_states[nr_idle_states].name, "Nap");
                        strcpy(powernv_states[nr_idle_states].desc, "Nap");
                        powernv_states[nr_idle_states].flags = 0;
-                       powernv_states[nr_idle_states].exit_latency =
-                                       ((unsigned int)latency_ns) / 1000;
-                       powernv_states[nr_idle_states].target_residency =
-                                       ((unsigned int)latency_ns / 100);
+                       powernv_states[nr_idle_states].target_residency = 100;
                        powernv_states[nr_idle_states].enter = &nap_loop;
-                       nr_idle_states++;
-               }
-
-               if (flags & OPAL_PM_SLEEP_ENABLED ||
-                       flags & OPAL_PM_SLEEP_ENABLED_ER1) {
+               } else if (flags[i] & OPAL_PM_SLEEP_ENABLED ||
+                       flags[i] & OPAL_PM_SLEEP_ENABLED_ER1) {
                        /* Add FASTSLEEP state */
                        strcpy(powernv_states[nr_idle_states].name, "FastSleep");
                        strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
                        powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIMER_STOP;
-                       powernv_states[nr_idle_states].exit_latency =
-                                       ((unsigned int)latency_ns) / 1000;
-                       powernv_states[nr_idle_states].target_residency =
-                                       ((unsigned int)latency_ns / 100);
+                       powernv_states[nr_idle_states].target_residency = 300000;
                        powernv_states[nr_idle_states].enter = &fastsleep_loop;
-                       nr_idle_states++;
                }
+
+               powernv_states[nr_idle_states].exit_latency =
+                               ((unsigned int)latency_ns[i]) / 1000;
+
+               if (!rc) {
+                       powernv_states[nr_idle_states].target_residency =
+                               ((unsigned int)residency_ns[i]) / 1000;
+               }
+
+               nr_idle_states++;
        }
 
+       kfree(residency_ns);
+out_free_latency:
+       kfree(latency_ns);
+out_free_flags:
+       kfree(flags);
+out:
        return nr_idle_states;
 }
 
index 17638d7cf5c279a3b2fd63aa2250939a8c059388..5907c1718f8c74fbe4a7d2c3bceb21759ed07422 100644 (file)
@@ -2174,14 +2174,20 @@ static void __log_bus_error(struct mem_ctl_info *mci, struct err_info *err,
 
 static inline void decode_bus_error(int node_id, struct mce *m)
 {
-       struct mem_ctl_info *mci = mcis[node_id];
-       struct amd64_pvt *pvt = mci->pvt_info;
+       struct mem_ctl_info *mci;
+       struct amd64_pvt *pvt;
        u8 ecc_type = (m->status >> 45) & 0x3;
        u8 xec = XEC(m->status, 0x1f);
        u16 ec = EC(m->status);
        u64 sys_addr;
        struct err_info err;
 
+       mci = edac_mc_find(node_id);
+       if (!mci)
+               return;
+
+       pvt = mci->pvt_info;
+
        /* Bail out early if this was an 'observed' error */
        if (PP(ec) == NBSL_PP_OBS)
                return;
index 63aa6730e89ea511492f6e101f48107fa59c8c3e..1acf57ba4c86bdc95b2e6d4d8281295f5568f6fc 100644 (file)
@@ -2447,7 +2447,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_ibridge_table);
                type = IVY_BRIDGE;
                break;
-       case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
+       case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
                rc = sbridge_get_all_devices(&num_mc, pci_dev_descr_sbridge_table);
                type = SANDY_BRIDGE;
                break;
@@ -2460,8 +2460,11 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                type = BROADWELL;
                break;
        }
-       if (unlikely(rc < 0))
+       if (unlikely(rc < 0)) {
+               edac_dbg(0, "couldn't get all devices for 0x%x\n", pdev->device);
                goto fail0;
+       }
+
        mc = 0;
 
        list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
@@ -2474,7 +2477,7 @@ static int sbridge_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        goto fail1;
        }
 
-       sbridge_printk(KERN_INFO, "Driver loaded.\n");
+       sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
 
        mutex_unlock(&sbridge_edac_lock);
        return 0;
index af5d63c7cc53ded6b5ab4130b2b9279a5d52e988..2fe195002021d079ec36515a7ebba4385c1f3600 100644 (file)
@@ -75,29 +75,25 @@ efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
        unsigned long key;
        u32 desc_version;
 
-       *map_size = 0;
-       *desc_size = 0;
-       key = 0;
-       status = efi_call_early(get_memory_map, map_size, NULL,
-                               &key, desc_size, &desc_version);
-       if (status != EFI_BUFFER_TOO_SMALL)
-               return EFI_LOAD_ERROR;
-
+       *map_size = sizeof(*m) * 32;
+again:
        /*
         * Add an additional efi_memory_desc_t because we're doing an
         * allocation which may be in a new descriptor region.
         */
-       *map_size += *desc_size;
+       *map_size += sizeof(*m);
        status = efi_call_early(allocate_pool, EFI_LOADER_DATA,
                                *map_size, (void **)&m);
        if (status != EFI_SUCCESS)
                goto fail;
 
+       *desc_size = 0;
+       key = 0;
        status = efi_call_early(get_memory_map, map_size, m,
                                &key, desc_size, &desc_version);
        if (status == EFI_BUFFER_TOO_SMALL) {
                efi_call_early(free_pool, m);
-               return EFI_LOAD_ERROR;
+               goto again;
        }
 
        if (status != EFI_SUCCESS)
index db4fb6e1cc5b3ca83d14d3f1e0e52e66fe75979a..7c669c328c4c7b4945dd22cde0f631aa51cb6b4b 100644 (file)
@@ -1872,6 +1872,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
@@ -1926,6 +1927,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
 #endif
 #if IS_ENABLED(CONFIG_HID_SAITEK)
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
index 46edb4d3ed28efc61582811ea6b5fdeac2a2808a..204312bfab2c6319985ce8d82d0a92900c746c65 100644 (file)
 #define USB_DEVICE_ID_MS_LK6K          0x00f9
 #define USB_DEVICE_ID_MS_PRESENTER_8K_BT       0x0701
 #define USB_DEVICE_ID_MS_PRESENTER_8K_USB      0x0713
+#define USB_DEVICE_ID_MS_NE7K          0x071d
 #define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K      0x0730
 #define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500    0x076c
 #define USB_DEVICE_ID_MS_SURFACE_PRO_2   0x0799
 #define USB_VENDOR_ID_SAITEK           0x06a3
 #define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
 #define USB_DEVICE_ID_SAITEK_PS1000    0x0621
+#define USB_DEVICE_ID_SAITEK_RAT7_OLD  0x0ccb
 #define USB_DEVICE_ID_SAITEK_RAT7      0x0cd7
 #define USB_DEVICE_ID_SAITEK_MMO7      0x0cd0
 
index fbaea6eb882e21afb6cba576279834a099780434..af935eb198c93549867c4e50bb48a997e5cd3f2b 100644 (file)
@@ -264,6 +264,8 @@ static const struct hid_device_id ms_devices[] = {
                .driver_data = MS_ERGONOMY },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE4K_JP),
                .driver_data = MS_ERGONOMY },
+       { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_NE7K),
+               .driver_data = MS_ERGONOMY },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_LK6K),
                .driver_data = MS_ERGONOMY | MS_RDESC },
        { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB),
index 5632c54eadf0206faee31afefc99235763c37e14..a014f21275d8bfada33701b4bef5013f3b81eb2a 100644 (file)
@@ -177,6 +177,8 @@ static int saitek_event(struct hid_device *hdev, struct hid_field *field,
 static const struct hid_device_id saitek_devices[] = {
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000),
                .driver_data = SAITEK_FIX_PS1000 },
+       { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_OLD),
+               .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
                .driver_data = SAITEK_RELEASE_MODE_RAT7 },
        { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
index 6a58b6c723aa215408051e2b3c967205569a7b94..e54ce1097e2cc57f5049cf852016087a32534c47 100644 (file)
@@ -135,8 +135,9 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
 {
        struct hid_sensor_hub_callbacks_list *callback;
        struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
+       unsigned long flags;
 
-       spin_lock(&pdata->dyn_callback_lock);
+       spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
        list_for_each_entry(callback, &pdata->dyn_callback_list, list)
                if (callback->usage_id == usage_id &&
                        (collection_index >=
@@ -145,10 +146,11 @@ static struct hid_sensor_hub_callbacks *sensor_hub_get_callback(
                                callback->hsdev->end_collection_index)) {
                        *priv = callback->priv;
                        *hsdev = callback->hsdev;
-                       spin_unlock(&pdata->dyn_callback_lock);
+                       spin_unlock_irqrestore(&pdata->dyn_callback_lock,
+                                              flags);
                        return callback->usage_callback;
                }
-       spin_unlock(&pdata->dyn_callback_lock);
+       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
 
        return NULL;
 }
index 31e9d25611064d0500eba47ea0b710beac31bb0b..1896c019e302934aa13c6f9ac434c51637f450ce 100644 (file)
@@ -804,7 +804,7 @@ union sixaxis_output_report_01 {
 #define DS4_REPORT_0x81_SIZE 7
 #define SIXAXIS_REPORT_0xF2_SIZE 18
 
-static spinlock_t sony_dev_list_lock;
+static DEFINE_SPINLOCK(sony_dev_list_lock);
 static LIST_HEAD(sony_device_list);
 static DEFINE_IDA(sony_device_id_allocator);
 
@@ -1944,6 +1944,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
                return -ENOMEM;
        }
 
+       spin_lock_init(&sc->lock);
+
        sc->quirks = quirks;
        hid_set_drvdata(hdev, sc);
        sc->hdev = hdev;
@@ -2147,8 +2149,8 @@ static void __exit sony_exit(void)
 {
        dbg_hid("Sony:%s\n", __func__);
 
-       ida_destroy(&sony_device_id_allocator);
        hid_unregister_driver(&sony_driver);
+       ida_destroy(&sony_device_id_allocator);
 }
 module_init(sony_init);
 module_exit(sony_exit);
index d43e967e75339ec7972e734e284c4356e31a4e38..36053f33d6d93e97009b0d6ba3f4aa5416be8fea 100644 (file)
@@ -370,7 +370,10 @@ static int i2c_hid_hwreset(struct i2c_client *client)
 static void i2c_hid_get_input(struct i2c_hid *ihid)
 {
        int ret, ret_size;
-       int size = ihid->bufsize;
+       int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
+
+       if (size > ihid->bufsize)
+               size = ihid->bufsize;
 
        ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
        if (ret != size) {
@@ -785,7 +788,7 @@ static int i2c_hid_init_irq(struct i2c_client *client)
        dev_dbg(&client->dev, "Requesting IRQ: %d\n", client->irq);
 
        ret = request_threaded_irq(client->irq, NULL, i2c_hid_irq,
-                       IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+                       IRQF_TRIGGER_LOW | IRQF_ONESHOT,
                        client->name, ihid);
        if (ret < 0) {
                dev_warn(&client->dev,
index 1a6507999a6534f0b851e209bb702696d1f50e58..046351cf17f3432814b46e42f0892920c76f1b32 100644 (file)
@@ -778,6 +778,11 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
                        input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[4]));
                        input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[6]));
                        input_report_abs(input, ABS_Z, be16_to_cpup((__be16 *)&data[8]));
+                       if ((data[2] & 0x07) | data[4] | data[5] | data[6] | data[7] | data[8] | data[9]) {
+                               input_report_abs(input, ABS_MISC, PAD_DEVICE_ID);
+                       } else {
+                               input_report_abs(input, ABS_MISC, 0);
+                       }
                } else if (features->type == CINTIQ_HYBRID) {
                        /*
                         * Do not send hardware buttons under Android. They
@@ -2725,9 +2730,9 @@ static const struct wacom_features wacom_features_0xF6 =
          .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf8, .touch_max = 10,
          .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
 static const struct wacom_features wacom_features_0x32A =
-       { "Wacom Cintiq 27QHD", 119740, 67520, 2047,
-         63, WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
-         WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
+       { "Wacom Cintiq 27QHD", 119740, 67520, 2047, 63,
+         WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
+         WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
 static const struct wacom_features wacom_features_0x32B =
        { "Wacom Cintiq 27QHD touch", 119740, 67520, 2047, 63,
          WACOM_27QHD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
index d931cbbed24069a072385725f5c1fd454e04acdb..110fade9cb74680f0f37115353fe166f7edeca3d 100644 (file)
@@ -1606,7 +1606,7 @@ config SENSORS_W83795
          will be called w83795.
 
 config SENSORS_W83795_FANCTRL
-       boolean "Include automatic fan control support (DANGEROUS)"
+       bool "Include automatic fan control support (DANGEROUS)"
        depends on SENSORS_W83795
        default n
        help
index a674cd83a4e2ecfb4e3fbf63f31539ca933ea02b..9f7dbd189c97420acfcbdef68e79137bb6b32015 100644 (file)
@@ -57,7 +57,7 @@ config SENSORS_LTC2978
          be called ltc2978.
 
 config SENSORS_LTC2978_REGULATOR
-       boolean "Regulator support for LTC2978 and compatibles"
+       bool "Regulator support for LTC2978 and compatibles"
        depends on SENSORS_LTC2978 && REGULATOR
        help
          If you say yes here you get regulator support for Linear
index 8c9e619f3026c9e4e88c979698cae39fd41e0c1a..78fbee46362828fceb0e098f0088ba8b8828e1f6 100644 (file)
@@ -35,11 +35,11 @@ config ACPI_I2C_OPREGION
 if I2C
 
 config I2C_BOARDINFO
-       boolean
+       bool
        default y
 
 config I2C_COMPAT
-       boolean "Enable compatibility bits for old user-space"
+       bool "Enable compatibility bits for old user-space"
        default y
        help
          Say Y here if you intend to run lm-sensors 3.1.1 or older, or any
index ab838d9e28b6389dc6d97dc633ea6259d2126ca3..22da9c2ffa2250cad9a7172bdbc94890d659a744 100644 (file)
@@ -79,7 +79,7 @@ config I2C_AMD8111
 
 config I2C_HIX5HD2
        tristate "Hix5hd2 high-speed I2C driver"
-       depends on ARCH_HIX5HD2
+       depends on ARCH_HIX5HD2 || COMPILE_TEST
        help
          Say Y here to include support for high-speed I2C controller in the
          Hisilicon based hix5hd2 SoCs.
@@ -372,6 +372,16 @@ config I2C_BCM2835
          This support is also available as a module.  If so, the module
          will be called i2c-bcm2835.
 
+config I2C_BCM_IPROC
+       tristate "Broadcom iProc I2C controller"
+       depends on ARCH_BCM_IPROC || COMPILE_TEST
+       default ARCH_BCM_IPROC
+       help
+         If you say yes to this option, support will be included for the
+         Broadcom iProc I2C controller.
+
+         If you don't know what to do here, say N.
+
 config I2C_BCM_KONA
        tristate "BCM Kona I2C adapter"
        depends on ARCH_BCM_MOBILE
@@ -465,6 +475,16 @@ config I2C_DESIGNWARE_PCI
          This driver can also be built as a module.  If so, the module
          will be called i2c-designware-pci.
 
+config I2C_DESIGNWARE_BAYTRAIL
+       bool "Intel Baytrail I2C semaphore support"
+       depends on I2C_DESIGNWARE_PLATFORM && IOSF_MBI=y && ACPI
+       help
+         This driver enables managed host access to the PMIC I2C bus on select
+         Intel BayTrail platforms using the X-Powers AXP288 PMIC. It allows
+         the host to request uninterrupted access to the PMIC's I2C bus from
+         the platform firmware controlling it. You should say Y if running on
+         a BayTrail system using the AXP288.
+
 config I2C_EFM32
        tristate "EFM32 I2C controller"
        depends on ARCH_EFM32 || COMPILE_TEST
index 56388f658d2f2567cbcf4b38433212c94d0d0faf..3638feb6677e1d6d7991b6d0d831ebc1b99c2e11 100644 (file)
@@ -33,6 +33,7 @@ obj-$(CONFIG_I2C_AT91)                += i2c-at91.o
 obj-$(CONFIG_I2C_AU1550)       += i2c-au1550.o
 obj-$(CONFIG_I2C_AXXIA)                += i2c-axxia.o
 obj-$(CONFIG_I2C_BCM2835)      += i2c-bcm2835.o
+obj-$(CONFIG_I2C_BCM_IPROC)    += i2c-bcm-iproc.o
 obj-$(CONFIG_I2C_BLACKFIN_TWI) += i2c-bfin-twi.o
 obj-$(CONFIG_I2C_CADENCE)      += i2c-cadence.o
 obj-$(CONFIG_I2C_CBUS_GPIO)    += i2c-cbus-gpio.o
@@ -41,6 +42,7 @@ obj-$(CONFIG_I2C_DAVINCI)     += i2c-davinci.o
 obj-$(CONFIG_I2C_DESIGNWARE_CORE)      += i2c-designware-core.o
 obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM)  += i2c-designware-platform.o
 i2c-designware-platform-objs := i2c-designware-platdrv.o
+i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o
 obj-$(CONFIG_I2C_DESIGNWARE_PCI)       += i2c-designware-pci.o
 i2c-designware-pci-objs := i2c-designware-pcidrv.o
 obj-$(CONFIG_I2C_EFM32)                += i2c-efm32.o
diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c
new file mode 100644 (file)
index 0000000..d3c8915
--- /dev/null
@@ -0,0 +1,461 @@
+/*
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#define CFG_OFFSET                   0x00
+#define CFG_RESET_SHIFT              31
+#define CFG_EN_SHIFT                 30
+#define CFG_M_RETRY_CNT_SHIFT        16
+#define CFG_M_RETRY_CNT_MASK         0x0f
+
+#define TIM_CFG_OFFSET               0x04
+#define TIM_CFG_MODE_400_SHIFT       31
+
+#define M_FIFO_CTRL_OFFSET           0x0c
+#define M_FIFO_RX_FLUSH_SHIFT        31
+#define M_FIFO_TX_FLUSH_SHIFT        30
+#define M_FIFO_RX_CNT_SHIFT          16
+#define M_FIFO_RX_CNT_MASK           0x7f
+#define M_FIFO_RX_THLD_SHIFT         8
+#define M_FIFO_RX_THLD_MASK          0x3f
+
+#define M_CMD_OFFSET                 0x30
+#define M_CMD_START_BUSY_SHIFT       31
+#define M_CMD_STATUS_SHIFT           25
+#define M_CMD_STATUS_MASK            0x07
+#define M_CMD_STATUS_SUCCESS         0x0
+#define M_CMD_STATUS_LOST_ARB        0x1
+#define M_CMD_STATUS_NACK_ADDR       0x2
+#define M_CMD_STATUS_NACK_DATA       0x3
+#define M_CMD_STATUS_TIMEOUT         0x4
+#define M_CMD_PROTOCOL_SHIFT         9
+#define M_CMD_PROTOCOL_MASK          0xf
+#define M_CMD_PROTOCOL_BLK_WR        0x7
+#define M_CMD_PROTOCOL_BLK_RD        0x8
+#define M_CMD_PEC_SHIFT              8
+#define M_CMD_RD_CNT_SHIFT           0
+#define M_CMD_RD_CNT_MASK            0xff
+
+#define IE_OFFSET                    0x38
+#define IE_M_RX_FIFO_FULL_SHIFT      31
+#define IE_M_RX_THLD_SHIFT           30
+#define IE_M_START_BUSY_SHIFT        28
+
+#define IS_OFFSET                    0x3c
+#define IS_M_RX_FIFO_FULL_SHIFT      31
+#define IS_M_RX_THLD_SHIFT           30
+#define IS_M_START_BUSY_SHIFT        28
+
+#define M_TX_OFFSET                  0x40
+#define M_TX_WR_STATUS_SHIFT         31
+#define M_TX_DATA_SHIFT              0
+#define M_TX_DATA_MASK               0xff
+
+#define M_RX_OFFSET                  0x44
+#define M_RX_STATUS_SHIFT            30
+#define M_RX_STATUS_MASK             0x03
+#define M_RX_PEC_ERR_SHIFT           29
+#define M_RX_DATA_SHIFT              0
+#define M_RX_DATA_MASK               0xff
+
+#define I2C_TIMEOUT_MESC             100
+#define M_TX_RX_FIFO_SIZE            64
+
+enum bus_speed_index {
+       I2C_SPD_100K = 0,
+       I2C_SPD_400K,
+};
+
+struct bcm_iproc_i2c_dev {
+       struct device *device;
+       int irq;
+
+       void __iomem *base;
+
+       struct i2c_adapter adapter;
+
+       struct completion done;
+       int xfer_is_done;
+};
+
+/*
+ * Can be expanded in the future if more interrupt status bits are utilized
+ */
+#define ISR_MASK (1 << IS_M_START_BUSY_SHIFT)
+
+static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data)
+{
+       struct bcm_iproc_i2c_dev *iproc_i2c = data;
+       u32 status = readl(iproc_i2c->base + IS_OFFSET);
+
+       status &= ISR_MASK;
+
+       if (!status)
+               return IRQ_NONE;
+
+       writel(status, iproc_i2c->base + IS_OFFSET);
+       iproc_i2c->xfer_is_done = 1;
+       complete_all(&iproc_i2c->done);
+
+       return IRQ_HANDLED;
+}
+
+static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c,
+                                     struct i2c_msg *msg)
+{
+       u32 val;
+
+       val = readl(iproc_i2c->base + M_CMD_OFFSET);
+       val = (val >> M_CMD_STATUS_SHIFT) & M_CMD_STATUS_MASK;
+
+       switch (val) {
+       case M_CMD_STATUS_SUCCESS:
+               return 0;
+
+       case M_CMD_STATUS_LOST_ARB:
+               dev_dbg(iproc_i2c->device, "lost bus arbitration\n");
+               return -EAGAIN;
+
+       case M_CMD_STATUS_NACK_ADDR:
+               dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr);
+               return -ENXIO;
+
+       case M_CMD_STATUS_NACK_DATA:
+               dev_dbg(iproc_i2c->device, "NAK data\n");
+               return -ENXIO;
+
+       case M_CMD_STATUS_TIMEOUT:
+               dev_dbg(iproc_i2c->device, "bus timeout\n");
+               return -ETIMEDOUT;
+
+       default:
+               dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val);
+               return -EIO;
+       }
+}
+
+static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c,
+                                        struct i2c_msg *msg)
+{
+       int ret, i;
+       u8 addr;
+       u32 val;
+       unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MESC);
+
+       /* need to reserve one byte in the FIFO for the slave address */
+       if (msg->len > M_TX_RX_FIFO_SIZE - 1) {
+               dev_err(iproc_i2c->device,
+                       "only support data length up to %u bytes\n",
+                       M_TX_RX_FIFO_SIZE - 1);
+               return -EOPNOTSUPP;
+       }
+
+       /* check if bus is busy */
+       if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) &
+              BIT(M_CMD_START_BUSY_SHIFT))) {
+               dev_warn(iproc_i2c->device, "bus is busy\n");
+               return -EBUSY;
+       }
+
+       /* format and load slave address into the TX FIFO */
+       addr = msg->addr << 1 | (msg->flags & I2C_M_RD ? 1 : 0);
+       writel(addr, iproc_i2c->base + M_TX_OFFSET);
+
+       /* for a write transaction, load data into the TX FIFO */
+       if (!(msg->flags & I2C_M_RD)) {
+               for (i = 0; i < msg->len; i++) {
+                       val = msg->buf[i];
+
+                       /* mark the last byte */
+                       if (i == msg->len - 1)
+                               val |= 1 << M_TX_WR_STATUS_SHIFT;
+
+                       writel(val, iproc_i2c->base + M_TX_OFFSET);
+               }
+       }
+
+       /* mark as incomplete before starting the transaction */
+       reinit_completion(&iproc_i2c->done);
+       iproc_i2c->xfer_is_done = 0;
+
+       /*
+        * Enable the "start busy" interrupt, which will be triggered after the
+        * transaction is done, i.e., the internal start_busy bit, transitions
+        * from 1 to 0.
+        */
+       writel(1 << IE_M_START_BUSY_SHIFT, iproc_i2c->base + IE_OFFSET);
+
+       /*
+        * Now we can activate the transfer. For a read operation, specify the
+        * number of bytes to read
+        */
+       val = 1 << M_CMD_START_BUSY_SHIFT;
+       if (msg->flags & I2C_M_RD) {
+               val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) |
+                      (msg->len << M_CMD_RD_CNT_SHIFT);
+       } else {
+               val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT);
+       }
+       writel(val, iproc_i2c->base + M_CMD_OFFSET);
+
+       time_left = wait_for_completion_timeout(&iproc_i2c->done, time_left);
+
+       /* disable all interrupts */
+       writel(0, iproc_i2c->base + IE_OFFSET);
+       /* read it back to flush the write */
+       readl(iproc_i2c->base + IE_OFFSET);
+
+       /* make sure the interrupt handler isn't running */
+       synchronize_irq(iproc_i2c->irq);
+
+       if (!time_left && !iproc_i2c->xfer_is_done) {
+               dev_err(iproc_i2c->device, "transaction timed out\n");
+
+               /* flush FIFOs */
+               val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
+                     (1 << M_FIFO_TX_FLUSH_SHIFT);
+               writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+               return -ETIMEDOUT;
+       }
+
+       ret = bcm_iproc_i2c_check_status(iproc_i2c, msg);
+       if (ret) {
+               /* flush both TX/RX FIFOs */
+               val = (1 << M_FIFO_RX_FLUSH_SHIFT) |
+                     (1 << M_FIFO_TX_FLUSH_SHIFT);
+               writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+               return ret;
+       }
+
+       /*
+        * For a read operation, we now need to load the data from FIFO
+        * into the memory buffer
+        */
+       if (msg->flags & I2C_M_RD) {
+               for (i = 0; i < msg->len; i++) {
+                       msg->buf[i] = (readl(iproc_i2c->base + M_RX_OFFSET) >>
+                                     M_RX_DATA_SHIFT) & M_RX_DATA_MASK;
+               }
+       }
+
+       return 0;
+}
+
+static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter,
+                             struct i2c_msg msgs[], int num)
+{
+       struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(adapter);
+       int ret, i;
+
+       /* go through all messages */
+       for (i = 0; i < num; i++) {
+               ret = bcm_iproc_i2c_xfer_single_msg(iproc_i2c, &msgs[i]);
+               if (ret) {
+                       dev_dbg(iproc_i2c->device, "xfer failed\n");
+                       return ret;
+               }
+       }
+
+       return num;
+}
+
+static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm bcm_iproc_algo = {
+       .master_xfer = bcm_iproc_i2c_xfer,
+       .functionality = bcm_iproc_i2c_functionality,
+};
+
+static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+       unsigned int bus_speed;
+       u32 val;
+       int ret = of_property_read_u32(iproc_i2c->device->of_node,
+                                      "clock-frequency", &bus_speed);
+       if (ret < 0) {
+               dev_info(iproc_i2c->device,
+                       "unable to interpret clock-frequency DT property\n");
+               bus_speed = 100000;
+       }
+
+       if (bus_speed < 100000) {
+               dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n",
+                       bus_speed);
+               dev_err(iproc_i2c->device,
+                       "valid speeds are 100khz and 400khz\n");
+               return -EINVAL;
+       } else if (bus_speed < 400000) {
+               bus_speed = 100000;
+       } else {
+               bus_speed = 400000;
+       }
+
+       val = readl(iproc_i2c->base + TIM_CFG_OFFSET);
+       val &= ~(1 << TIM_CFG_MODE_400_SHIFT);
+       val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT;
+       writel(val, iproc_i2c->base + TIM_CFG_OFFSET);
+
+       dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed);
+
+       return 0;
+}
+
+static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c)
+{
+       u32 val;
+
+       /* put controller in reset */
+       val = readl(iproc_i2c->base + CFG_OFFSET);
+       val |= 1 << CFG_RESET_SHIFT;
+       val &= ~(1 << CFG_EN_SHIFT);
+       writel(val, iproc_i2c->base + CFG_OFFSET);
+
+       /* wait 100 usec per spec */
+       udelay(100);
+
+       /* bring controller out of reset */
+       val &= ~(1 << CFG_RESET_SHIFT);
+       writel(val, iproc_i2c->base + CFG_OFFSET);
+
+       /* flush TX/RX FIFOs and set RX FIFO threshold to zero */
+       val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT);
+       writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET);
+
+       /* disable all interrupts */
+       writel(0, iproc_i2c->base + IE_OFFSET);
+
+       /* clear all pending interrupts */
+       writel(0xffffffff, iproc_i2c->base + IS_OFFSET);
+
+       return 0;
+}
+
+static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c,
+                                        bool enable)
+{
+       u32 val;
+
+       val = readl(iproc_i2c->base + CFG_OFFSET);
+       if (enable)
+               val |= BIT(CFG_EN_SHIFT);
+       else
+               val &= ~BIT(CFG_EN_SHIFT);
+       writel(val, iproc_i2c->base + CFG_OFFSET);
+}
+
+static int bcm_iproc_i2c_probe(struct platform_device *pdev)
+{
+       int irq, ret = 0;
+       struct bcm_iproc_i2c_dev *iproc_i2c;
+       struct i2c_adapter *adap;
+       struct resource *res;
+
+       iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c),
+                                GFP_KERNEL);
+       if (!iproc_i2c)
+               return -ENOMEM;
+
+       platform_set_drvdata(pdev, iproc_i2c);
+       iproc_i2c->device = &pdev->dev;
+       init_completion(&iproc_i2c->done);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       iproc_i2c->base = devm_ioremap_resource(iproc_i2c->device, res);
+       if (IS_ERR(iproc_i2c->base))
+               return PTR_ERR(iproc_i2c->base);
+
+       ret = bcm_iproc_i2c_init(iproc_i2c);
+       if (ret)
+               return ret;
+
+       ret = bcm_iproc_i2c_cfg_speed(iproc_i2c);
+       if (ret)
+               return ret;
+
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
+               dev_err(iproc_i2c->device, "no irq resource\n");
+               return irq;
+       }
+       iproc_i2c->irq = irq;
+
+       ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0,
+                              pdev->name, iproc_i2c);
+       if (ret < 0) {
+               dev_err(iproc_i2c->device, "unable to request irq %i\n", irq);
+               return ret;
+       }
+
+       bcm_iproc_i2c_enable_disable(iproc_i2c, true);
+
+       adap = &iproc_i2c->adapter;
+       i2c_set_adapdata(adap, iproc_i2c);
+       strlcpy(adap->name, "Broadcom iProc I2C adapter", sizeof(adap->name));
+       adap->algo = &bcm_iproc_algo;
+       adap->dev.parent = &pdev->dev;
+       adap->dev.of_node = pdev->dev.of_node;
+
+       ret = i2c_add_adapter(adap);
+       if (ret) {
+               dev_err(iproc_i2c->device, "failed to add adapter\n");
+               return ret;
+       }
+
+       return 0;
+}
+
+static int bcm_iproc_i2c_remove(struct platform_device *pdev)
+{
+       struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev);
+
+       /* make sure there's no pending interrupt when we remove the adapter */
+       writel(0, iproc_i2c->base + IE_OFFSET);
+       readl(iproc_i2c->base + IE_OFFSET);
+       synchronize_irq(iproc_i2c->irq);
+
+       i2c_del_adapter(&iproc_i2c->adapter);
+       bcm_iproc_i2c_enable_disable(iproc_i2c, false);
+
+       return 0;
+}
+
+static const struct of_device_id bcm_iproc_i2c_of_match[] = {
+       { .compatible = "brcm,iproc-i2c" },
+       { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match);
+
+static struct platform_driver bcm_iproc_i2c_driver = {
+       .driver = {
+               .name = "bcm-iproc-i2c",
+               .of_match_table = bcm_iproc_i2c_of_match,
+       },
+       .probe = bcm_iproc_i2c_probe,
+       .remove = bcm_iproc_i2c_remove,
+};
+module_platform_driver(bcm_iproc_i2c_driver);
+
+MODULE_AUTHOR("Ray Jui <rjui@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom iProc I2C Driver");
+MODULE_LICENSE("GPL v2");
index 626f74ecd4be4ad40a0b45b2c6730c0afd4b0528..7d7a14cdadfb187d5fd4f95918146c8c6555443a 100644 (file)
  * @suspended:         Flag holding the device's PM status
  * @send_count:                Number of bytes still expected to send
  * @recv_count:                Number of bytes still expected to receive
+ * @curr_recv_count:   Number of bytes to be received in current transfer
  * @irq:               IRQ number
  * @input_clk:         Input clock to I2C controller
  * @i2c_clk:           Maximum I2C clock speed
@@ -146,6 +147,7 @@ struct cdns_i2c {
        u8 suspended;
        unsigned int send_count;
        unsigned int recv_count;
+       unsigned int curr_recv_count;
        int irq;
        unsigned long input_clk;
        unsigned int i2c_clk;
@@ -182,14 +184,15 @@ static void cdns_i2c_clear_bus_hold(struct cdns_i2c *id)
  */
 static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
 {
-       unsigned int isr_status, avail_bytes;
-       unsigned int bytes_to_recv, bytes_to_send;
+       unsigned int isr_status, avail_bytes, updatetx;
+       unsigned int bytes_to_send;
        struct cdns_i2c *id = ptr;
        /* Signal completion only after everything is updated */
        int done_flag = 0;
        irqreturn_t status = IRQ_NONE;
 
        isr_status = cdns_i2c_readreg(CDNS_I2C_ISR_OFFSET);
+       cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
 
        /* Handling nack and arbitration lost interrupt */
        if (isr_status & (CDNS_I2C_IXR_NACK | CDNS_I2C_IXR_ARB_LOST)) {
@@ -197,89 +200,112 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
                status = IRQ_HANDLED;
        }
 
-       /* Handling Data interrupt */
-       if ((isr_status & CDNS_I2C_IXR_DATA) &&
-                       (id->recv_count >= CDNS_I2C_DATA_INTR_DEPTH)) {
-               /* Always read data interrupt threshold bytes */
-               bytes_to_recv = CDNS_I2C_DATA_INTR_DEPTH;
-               id->recv_count -= CDNS_I2C_DATA_INTR_DEPTH;
-               avail_bytes = cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
-
-               /*
-                * if the tranfer size register value is zero, then
-                * check for the remaining bytes and update the
-                * transfer size register.
-                */
-               if (!avail_bytes) {
-                       if (id->recv_count > CDNS_I2C_TRANSFER_SIZE)
-                               cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
-                                               CDNS_I2C_XFER_SIZE_OFFSET);
-                       else
-                               cdns_i2c_writereg(id->recv_count,
-                                               CDNS_I2C_XFER_SIZE_OFFSET);
-               }
+       /*
+        * Check if transfer size register needs to be updated again for a
+        * large data receive operation.
+        */
+       updatetx = 0;
+       if (id->recv_count > id->curr_recv_count)
+               updatetx = 1;
+
+       /* When receiving, handle data interrupt and completion interrupt */
+       if (id->p_recv_buf &&
+           ((isr_status & CDNS_I2C_IXR_COMP) ||
+            (isr_status & CDNS_I2C_IXR_DATA))) {
+               /* Read data if receive data valid is set */
+               while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
+                      CDNS_I2C_SR_RXDV) {
+                       /*
+                        * Clear hold bit that was set for FIFO control if
+                        * RX data left is less than FIFO depth, unless
+                        * repeated start is selected.
+                        */
+                       if ((id->recv_count < CDNS_I2C_FIFO_DEPTH) &&
+                           !id->bus_hold_flag)
+                               cdns_i2c_clear_bus_hold(id);
 
-               /* Process the data received */
-               while (bytes_to_recv--)
                        *(id->p_recv_buf)++ =
                                cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
+                       id->recv_count--;
+                       id->curr_recv_count--;
 
-               if (!id->bus_hold_flag &&
-                               (id->recv_count <= CDNS_I2C_FIFO_DEPTH))
-                       cdns_i2c_clear_bus_hold(id);
+                       if (updatetx &&
+                           (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1))
+                               break;
+               }
 
-               status = IRQ_HANDLED;
-       }
+               /*
+                * The controller sends NACK to the slave when transfer size
+                * register reaches zero without considering the HOLD bit.
+                * This workaround is implemented for large data transfers to
+                * maintain transfer size non-zero while performing a large
+                * receive operation.
+                */
+               if (updatetx &&
+                   (id->curr_recv_count == CDNS_I2C_FIFO_DEPTH + 1)) {
+                       /* wait while fifo is full */
+                       while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) !=
+                              (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH))
+                               ;
 
-       /* Handling Transfer Complete interrupt */
-       if (isr_status & CDNS_I2C_IXR_COMP) {
-               if (!id->p_recv_buf) {
                        /*
-                        * If the device is sending data If there is further
-                        * data to be sent. Calculate the available space
-                        * in FIFO and fill the FIFO with that many bytes.
+                        * Check number of bytes to be received against maximum
+                        * transfer size and update register accordingly.
                         */
-                       if (id->send_count) {
-                               avail_bytes = CDNS_I2C_FIFO_DEPTH -
-                                   cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
-                               if (id->send_count > avail_bytes)
-                                       bytes_to_send = avail_bytes;
-                               else
-                                       bytes_to_send = id->send_count;
-
-                               while (bytes_to_send--) {
-                                       cdns_i2c_writereg(
-                                               (*(id->p_send_buf)++),
-                                                CDNS_I2C_DATA_OFFSET);
-                                       id->send_count--;
-                               }
+                       if (((int)(id->recv_count) - CDNS_I2C_FIFO_DEPTH) >
+                           CDNS_I2C_TRANSFER_SIZE) {
+                               cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
+                                                 CDNS_I2C_XFER_SIZE_OFFSET);
+                               id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE +
+                                                     CDNS_I2C_FIFO_DEPTH;
                        } else {
-                               /*
-                                * Signal the completion of transaction and
-                                * clear the hold bus bit if there are no
-                                * further messages to be processed.
-                                */
-                               done_flag = 1;
+                               cdns_i2c_writereg(id->recv_count -
+                                                 CDNS_I2C_FIFO_DEPTH,
+                                                 CDNS_I2C_XFER_SIZE_OFFSET);
+                               id->curr_recv_count = id->recv_count;
                        }
-                       if (!id->send_count && !id->bus_hold_flag)
-                               cdns_i2c_clear_bus_hold(id);
-               } else {
+               }
+
+               /* Clear hold (if not repeated start) and signal completion */
+               if ((isr_status & CDNS_I2C_IXR_COMP) && !id->recv_count) {
                        if (!id->bus_hold_flag)
                                cdns_i2c_clear_bus_hold(id);
+                       done_flag = 1;
+               }
+
+               status = IRQ_HANDLED;
+       }
+
+       /* When sending, handle transfer complete interrupt */
+       if ((isr_status & CDNS_I2C_IXR_COMP) && !id->p_recv_buf) {
+               /*
+                * If there is more data to be sent, calculate the
+                * space available in FIFO and fill with that many bytes.
+                */
+               if (id->send_count) {
+                       avail_bytes = CDNS_I2C_FIFO_DEPTH -
+                           cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET);
+                       if (id->send_count > avail_bytes)
+                               bytes_to_send = avail_bytes;
+                       else
+                               bytes_to_send = id->send_count;
+
+                       while (bytes_to_send--) {
+                               cdns_i2c_writereg(
+                                       (*(id->p_send_buf)++),
+                                        CDNS_I2C_DATA_OFFSET);
+                               id->send_count--;
+                       }
+               } else {
                        /*
-                        * If the device is receiving data, then signal
-                        * the completion of transaction and read the data
-                        * present in the FIFO. Signal the completion of
-                        * transaction.
+                        * Signal the completion of transaction and
+                        * clear the hold bus bit if there are no
+                        * further messages to be processed.
                         */
-                       while (cdns_i2c_readreg(CDNS_I2C_SR_OFFSET) &
-                                       CDNS_I2C_SR_RXDV) {
-                               *(id->p_recv_buf)++ =
-                                       cdns_i2c_readreg(CDNS_I2C_DATA_OFFSET);
-                               id->recv_count--;
-                       }
                        done_flag = 1;
                }
+               if (!id->send_count && !id->bus_hold_flag)
+                       cdns_i2c_clear_bus_hold(id);
 
                status = IRQ_HANDLED;
        }
@@ -289,8 +315,6 @@ static irqreturn_t cdns_i2c_isr(int irq, void *ptr)
        if (id->err_status)
                status = IRQ_HANDLED;
 
-       cdns_i2c_writereg(isr_status, CDNS_I2C_ISR_OFFSET);
-
        if (done_flag)
                complete(&id->xfer_done);
 
@@ -316,6 +340,8 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
        if (id->p_msg->flags & I2C_M_RECV_LEN)
                id->recv_count = I2C_SMBUS_BLOCK_MAX + 1;
 
+       id->curr_recv_count = id->recv_count;
+
        /*
         * Check for the message size against FIFO depth and set the
         * 'hold bus' bit if it is greater than FIFO depth.
@@ -335,11 +361,14 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id)
         * receive if it is less than transfer size and transfer size if
         * it is more. Enable the interrupts.
         */
-       if (id->recv_count > CDNS_I2C_TRANSFER_SIZE)
+       if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) {
                cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE,
                                  CDNS_I2C_XFER_SIZE_OFFSET);
-       else
+               id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE;
+       } else {
                cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET);
+       }
+
        /* Clear the bus hold flag if bytes to receive is less than FIFO size */
        if (!id->bus_hold_flag &&
                ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) &&
@@ -516,6 +545,20 @@ static int cdns_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
         * processed with a repeated start.
         */
        if (num > 1) {
+               /*
+                * This controller does not give completion interrupt after a
+                * master receive message if HOLD bit is set (repeated start),
+                * resulting in SW timeout. Hence, if a receive message is
+                * followed by any other message, an error is returned
+                * indicating that this sequence is not supported.
+                */
+               for (count = 0; count < num - 1; count++) {
+                       if (msgs[count].flags & I2C_M_RD) {
+                               dev_warn(adap->dev.parent,
+                                        "Can't do repeated start after a receive message\n");
+                               return -EOPNOTSUPP;
+                       }
+               }
                id->bus_hold_flag = 1;
                reg = cdns_i2c_readreg(CDNS_I2C_CR_OFFSET);
                reg |= CDNS_I2C_CR_HOLD;
diff --git a/drivers/i2c/busses/i2c-designware-baytrail.c b/drivers/i2c/busses/i2c-designware-baytrail.c
new file mode 100644 (file)
index 0000000..5f1ff4c
--- /dev/null
@@ -0,0 +1,160 @@
+/*
+ * Intel BayTrail PMIC I2C bus semaphore implementaion
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/acpi.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <asm/iosf_mbi.h>
+#include "i2c-designware-core.h"
+
+#define SEMAPHORE_TIMEOUT      100
+#define PUNIT_SEMAPHORE                0x7
+
+static unsigned long acquired;
+
+static int get_sem(struct device *dev, u32 *sem)
+{
+       u32 reg_val;
+       int ret;
+
+       ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ, PUNIT_SEMAPHORE,
+                           &reg_val);
+       if (ret) {
+               dev_err(dev, "iosf failed to read punit semaphore\n");
+               return ret;
+       }
+
+       *sem = reg_val & 0x1;
+
+       return 0;
+}
+
+static void reset_semaphore(struct device *dev)
+{
+       u32 data;
+
+       if (iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+                               PUNIT_SEMAPHORE, &data)) {
+               dev_err(dev, "iosf failed to reset punit semaphore during read\n");
+               return;
+       }
+
+       data = data & 0xfffffffe;
+       if (iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+                                PUNIT_SEMAPHORE, data))
+               dev_err(dev, "iosf failed to reset punit semaphore during write\n");
+}
+
+int baytrail_i2c_acquire(struct dw_i2c_dev *dev)
+{
+       u32 sem = 0;
+       int ret;
+       unsigned long start, end;
+
+       if (!dev || !dev->dev)
+               return -ENODEV;
+
+       if (!dev->acquire_lock)
+               return 0;
+
+       /* host driver writes 0x2 to side band semaphore register */
+       ret = iosf_mbi_write(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_WRITE,
+                                PUNIT_SEMAPHORE, 0x2);
+       if (ret) {
+               dev_err(dev->dev, "iosf punit semaphore request failed\n");
+               return ret;
+       }
+
+       /* host driver waits for bit 0 to be set in semaphore register */
+       start = jiffies;
+       end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
+       while (!time_after(jiffies, end)) {
+               ret = get_sem(dev->dev, &sem);
+               if (!ret && sem) {
+                       acquired = jiffies;
+                       dev_dbg(dev->dev, "punit semaphore acquired after %ums\n",
+                               jiffies_to_msecs(jiffies - start));
+                       return 0;
+               }
+
+               usleep_range(1000, 2000);
+       }
+
+       dev_err(dev->dev, "punit semaphore timed out, resetting\n");
+       reset_semaphore(dev->dev);
+
+       ret = iosf_mbi_read(BT_MBI_UNIT_PMC, BT_MBI_BUNIT_READ,
+               PUNIT_SEMAPHORE, &sem);
+       if (!ret)
+               dev_err(dev->dev, "iosf failed to read punit semaphore\n");
+       else
+               dev_err(dev->dev, "PUNIT SEM: %d\n", sem);
+
+       WARN_ON(1);
+
+       return -ETIMEDOUT;
+}
+EXPORT_SYMBOL(baytrail_i2c_acquire);
+
+void baytrail_i2c_release(struct dw_i2c_dev *dev)
+{
+       if (!dev || !dev->dev)
+               return;
+
+       if (!dev->acquire_lock)
+               return;
+
+       reset_semaphore(dev->dev);
+       dev_dbg(dev->dev, "punit semaphore held for %ums\n",
+               jiffies_to_msecs(jiffies - acquired));
+}
+EXPORT_SYMBOL(baytrail_i2c_release);
+
+int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev)
+{
+       acpi_status status;
+       unsigned long long shared_host = 0;
+       acpi_handle handle;
+
+       if (!dev || !dev->dev)
+               return 0;
+
+       handle = ACPI_HANDLE(dev->dev);
+       if (!handle)
+               return 0;
+
+       status = acpi_evaluate_integer(handle, "_SEM", NULL, &shared_host);
+
+       if (ACPI_FAILURE(status))
+               return 0;
+
+       if (shared_host) {
+               dev_info(dev->dev, "I2C bus managed by PUNIT\n");
+               dev->acquire_lock = baytrail_i2c_acquire;
+               dev->release_lock = baytrail_i2c_release;
+               dev->pm_runtime_disabled = true;
+       }
+
+       if (!iosf_mbi_available())
+               return -EPROBE_DEFER;
+
+       return 0;
+}
+EXPORT_SYMBOL(i2c_dw_eval_lock_support);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("Baytrail I2C Semaphore driver");
+MODULE_LICENSE("GPL v2");
index 23628b7bfb8d8df208c6e434efb95e887dfad6e6..6e25c010e69037a544ad04f82df46e27e80ca9af 100644 (file)
@@ -170,10 +170,10 @@ u32 dw_readl(struct dw_i2c_dev *dev, int offset)
        u32 value;
 
        if (dev->accessor_flags & ACCESS_16BIT)
-               value = readw(dev->base + offset) |
-                       (readw(dev->base + offset + 2) << 16);
+               value = readw_relaxed(dev->base + offset) |
+                       (readw_relaxed(dev->base + offset + 2) << 16);
        else
-               value = readl(dev->base + offset);
+               value = readl_relaxed(dev->base + offset);
 
        if (dev->accessor_flags & ACCESS_SWAP)
                return swab32(value);
@@ -187,10 +187,10 @@ void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
                b = swab32(b);
 
        if (dev->accessor_flags & ACCESS_16BIT) {
-               writew((u16)b, dev->base + offset);
-               writew((u16)(b >> 16), dev->base + offset + 2);
+               writew_relaxed((u16)b, dev->base + offset);
+               writew_relaxed((u16)(b >> 16), dev->base + offset + 2);
        } else {
-               writel(b, dev->base + offset);
+               writel_relaxed(b, dev->base + offset);
        }
 }
 
@@ -285,6 +285,15 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        u32 hcnt, lcnt;
        u32 reg;
        u32 sda_falling_time, scl_falling_time;
+       int ret;
+
+       if (dev->acquire_lock) {
+               ret = dev->acquire_lock(dev);
+               if (ret) {
+                       dev_err(dev->dev, "couldn't acquire bus ownership\n");
+                       return ret;
+               }
+       }
 
        input_clock_khz = dev->get_clk_rate_khz(dev);
 
@@ -298,6 +307,8 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        } else if (reg != DW_IC_COMP_TYPE_VALUE) {
                dev_err(dev->dev, "Unknown Synopsys component type: "
                        "0x%08x\n", reg);
+               if (dev->release_lock)
+                       dev->release_lock(dev);
                return -ENODEV;
        }
 
@@ -309,40 +320,39 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
        sda_falling_time = dev->sda_falling_time ?: 300; /* ns */
        scl_falling_time = dev->scl_falling_time ?: 300; /* ns */
 
-       /* Standard-mode */
-       hcnt = i2c_dw_scl_hcnt(input_clock_khz,
-                               4000,   /* tHD;STA = tHIGH = 4.0 us */
-                               sda_falling_time,
-                               0,      /* 0: DW default, 1: Ideal */
-                               0);     /* No offset */
-       lcnt = i2c_dw_scl_lcnt(input_clock_khz,
-                               4700,   /* tLOW = 4.7 us */
-                               scl_falling_time,
-                               0);     /* No offset */
-
-       /* Allow platforms to specify the ideal HCNT and LCNT values */
+       /* Set SCL timing parameters for standard-mode */
        if (dev->ss_hcnt && dev->ss_lcnt) {
                hcnt = dev->ss_hcnt;
                lcnt = dev->ss_lcnt;
+       } else {
+               hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+                                       4000,   /* tHD;STA = tHIGH = 4.0 us */
+                                       sda_falling_time,
+                                       0,      /* 0: DW default, 1: Ideal */
+                                       0);     /* No offset */
+               lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+                                       4700,   /* tLOW = 4.7 us */
+                                       scl_falling_time,
+                                       0);     /* No offset */
        }
        dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT);
        dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT);
        dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
 
-       /* Fast-mode */
-       hcnt = i2c_dw_scl_hcnt(input_clock_khz,
-                               600,    /* tHD;STA = tHIGH = 0.6 us */
-                               sda_falling_time,
-                               0,      /* 0: DW default, 1: Ideal */
-                               0);     /* No offset */
-       lcnt = i2c_dw_scl_lcnt(input_clock_khz,
-                               1300,   /* tLOW = 1.3 us */
-                               scl_falling_time,
-                               0);     /* No offset */
-
+       /* Set SCL timing parameters for fast-mode */
        if (dev->fs_hcnt && dev->fs_lcnt) {
                hcnt = dev->fs_hcnt;
                lcnt = dev->fs_lcnt;
+       } else {
+               hcnt = i2c_dw_scl_hcnt(input_clock_khz,
+                                       600,    /* tHD;STA = tHIGH = 0.6 us */
+                                       sda_falling_time,
+                                       0,      /* 0: DW default, 1: Ideal */
+                                       0);     /* No offset */
+               lcnt = i2c_dw_scl_lcnt(input_clock_khz,
+                                       1300,   /* tLOW = 1.3 us */
+                                       scl_falling_time,
+                                       0);     /* No offset */
        }
        dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT);
        dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT);
@@ -364,6 +374,9 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
 
        /* configure the i2c master */
        dw_writel(dev, dev->master_cfg , DW_IC_CON);
+
+       if (dev->release_lock)
+               dev->release_lock(dev);
        return 0;
 }
 EXPORT_SYMBOL_GPL(i2c_dw_init);
@@ -627,6 +640,14 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        dev->abort_source = 0;
        dev->rx_outstanding = 0;
 
+       if (dev->acquire_lock) {
+               ret = dev->acquire_lock(dev);
+               if (ret) {
+                       dev_err(dev->dev, "couldn't acquire bus ownership\n");
+                       goto done_nolock;
+               }
+       }
+
        ret = i2c_dw_wait_bus_not_busy(dev);
        if (ret < 0)
                goto done;
@@ -672,6 +693,10 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        ret = -EIO;
 
 done:
+       if (dev->release_lock)
+               dev->release_lock(dev);
+
+done_nolock:
        pm_runtime_mark_last_busy(dev->dev);
        pm_runtime_put_autosuspend(dev->dev);
        mutex_unlock(&dev->lock);
index 5a410ef17abd40c0ab7c7ece3ab3fa539adf1f95..9630222abf32197f48d580e43a14b049821f570f 100644 (file)
@@ -61,6 +61,9 @@
  * @ss_lcnt: standard speed LCNT value
  * @fs_hcnt: fast speed HCNT value
  * @fs_lcnt: fast speed LCNT value
+ * @acquire_lock: function to acquire a hardware lock on the bus
+ * @release_lock: function to release a hardware lock on the bus
+ * @pm_runtime_disabled: true if pm runtime is disabled
  *
  * HCNT and LCNT parameters can be used if the platform knows more accurate
  * values than the one computed based only on the input clock frequency.
@@ -101,6 +104,9 @@ struct dw_i2c_dev {
        u16                     ss_lcnt;
        u16                     fs_hcnt;
        u16                     fs_lcnt;
+       int                     (*acquire_lock)(struct dw_i2c_dev *dev);
+       void                    (*release_lock)(struct dw_i2c_dev *dev);
+       bool                    pm_runtime_disabled;
 };
 
 #define ACCESS_SWAP            0x00000001
@@ -119,3 +125,9 @@ extern void i2c_dw_disable(struct dw_i2c_dev *dev);
 extern void i2c_dw_clear_int(struct dw_i2c_dev *dev);
 extern void i2c_dw_disable_int(struct dw_i2c_dev *dev);
 extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev);
+
+#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL)
+extern int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev);
+#else
+static inline int i2c_dw_eval_lock_support(struct dw_i2c_dev *dev) { return 0; }
+#endif
index acb40f95db78f512c561f6d4d2b5b67479844811..6643d2dc0b250ddbf022c669db4fd2b4b4f848e7 100644 (file)
@@ -6,7 +6,7 @@
  * Copyright (C) 2006 Texas Instruments.
  * Copyright (C) 2007 MontaVista Software Inc.
  * Copyright (C) 2009 Provigent Ltd.
- * Copyright (C) 2011 Intel corporation.
+ * Copyright (C) 2011, 2015 Intel Corporation.
  *
  * ----------------------------------------------------------------------------
  *
 #define DRIVER_NAME "i2c-designware-pci"
 
 enum dw_pci_ctl_id_t {
-       moorestown_0,
-       moorestown_1,
-       moorestown_2,
-
        medfield_0,
        medfield_1,
        medfield_2,
@@ -101,28 +97,7 @@ static struct dw_scl_sda_cfg hsw_config = {
        .sda_hold = 0x9,
 };
 
-static struct  dw_pci_controller  dw_pci_controllers[] = {
-       [moorestown_0] = {
-               .bus_num     = 0,
-               .bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-               .tx_fifo_depth = 32,
-               .rx_fifo_depth = 32,
-               .clk_khz      = 25000,
-       },
-       [moorestown_1] = {
-               .bus_num     = 1,
-               .bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-               .tx_fifo_depth = 32,
-               .rx_fifo_depth = 32,
-               .clk_khz      = 25000,
-       },
-       [moorestown_2] = {
-               .bus_num     = 2,
-               .bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
-               .tx_fifo_depth = 32,
-               .rx_fifo_depth = 32,
-               .clk_khz      = 25000,
-       },
+static struct dw_pci_controller dw_pci_controllers[] = {
        [medfield_0] = {
                .bus_num     = 0,
                .bus_cfg   = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
@@ -170,7 +145,6 @@ static struct  dw_pci_controller  dw_pci_controllers[] = {
                .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
                .tx_fifo_depth = 32,
                .rx_fifo_depth = 32,
-               .clk_khz = 100000,
                .functionality = I2C_FUNC_10BIT_ADDR,
                .scl_sda_cfg = &byt_config,
        },
@@ -179,7 +153,6 @@ static struct  dw_pci_controller  dw_pci_controllers[] = {
                .bus_cfg = INTEL_MID_STD_CFG | DW_IC_CON_SPEED_FAST,
                .tx_fifo_depth = 32,
                .rx_fifo_depth = 32,
-               .clk_khz = 100000,
                .functionality = I2C_FUNC_10BIT_ADDR,
                .scl_sda_cfg = &hsw_config,
        },
@@ -259,7 +232,7 @@ static int i2c_dw_pci_probe(struct pci_dev *pdev,
        dev->functionality = controller->functionality |
                                DW_DEFAULT_FUNCTIONALITY;
 
-       dev->master_cfg =  controller->bus_cfg;
+       dev->master_cfg = controller->bus_cfg;
        if (controller->scl_sda_cfg) {
                cfg = controller->scl_sda_cfg;
                dev->ss_hcnt = cfg->ss_hcnt;
@@ -325,12 +298,8 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev)
 MODULE_ALIAS("i2c_designware-pci");
 
 static const struct pci_device_id i2_designware_pci_ids[] = {
-       /* Moorestown */
-       { PCI_VDEVICE(INTEL, 0x0802), moorestown_0 },
-       { PCI_VDEVICE(INTEL, 0x0803), moorestown_1 },
-       { PCI_VDEVICE(INTEL, 0x0804), moorestown_2 },
        /* Medfield */
-       { PCI_VDEVICE(INTEL, 0x0817), medfield_3,},
+       { PCI_VDEVICE(INTEL, 0x0817), medfield_3 },
        { PCI_VDEVICE(INTEL, 0x0818), medfield_4 },
        { PCI_VDEVICE(INTEL, 0x0819), medfield_5 },
        { PCI_VDEVICE(INTEL, 0x082C), medfield_0 },
@@ -348,7 +317,7 @@ static const struct pci_device_id i2_designware_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x9c61), haswell },
        { PCI_VDEVICE(INTEL, 0x9c62), haswell },
        /* Braswell / Cherrytrail */
-       { PCI_VDEVICE(INTEL, 0x22C1), baytrail,},
+       { PCI_VDEVICE(INTEL, 0x22C1), baytrail },
        { PCI_VDEVICE(INTEL, 0x22C2), baytrail },
        { PCI_VDEVICE(INTEL, 0x22C3), baytrail },
        { PCI_VDEVICE(INTEL, 0x22C4), baytrail },
index 2b463c313e4e03305282b76c5fdefa8f90a48874..c270f5f9a8f9af3d3712bbd0f99874708875aa18 100644 (file)
@@ -195,6 +195,10 @@ static int dw_i2c_probe(struct platform_device *pdev)
                        clk_freq = pdata->i2c_scl_freq;
        }
 
+       r = i2c_dw_eval_lock_support(dev);
+       if (r)
+               return r;
+
        dev->functionality =
                I2C_FUNC_I2C |
                I2C_FUNC_10BIT_ADDR |
@@ -257,10 +261,14 @@ static int dw_i2c_probe(struct platform_device *pdev)
                return r;
        }
 
-       pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
-       pm_runtime_use_autosuspend(&pdev->dev);
-       pm_runtime_set_active(&pdev->dev);
-       pm_runtime_enable(&pdev->dev);
+       if (dev->pm_runtime_disabled) {
+               pm_runtime_forbid(&pdev->dev);
+       } else {
+               pm_runtime_set_autosuspend_delay(&pdev->dev, 1000);
+               pm_runtime_use_autosuspend(&pdev->dev);
+               pm_runtime_set_active(&pdev->dev);
+               pm_runtime_enable(&pdev->dev);
+       }
 
        return 0;
 }
@@ -310,7 +318,9 @@ static int dw_i2c_resume(struct device *dev)
        struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
 
        clk_prepare_enable(i_dev->clk);
-       i2c_dw_init(i_dev);
+
+       if (!i_dev->pm_runtime_disabled)
+               i2c_dw_init(i_dev);
 
        return 0;
 }
index 7f3a9fe9bf4e729a2c446ba905a117a325f17621..d7b26fc6f432005bb02a2394fd4bfb6720271ab1 100644 (file)
@@ -201,7 +201,7 @@ struct imx_i2c_struct {
        void __iomem            *base;
        wait_queue_head_t       queue;
        unsigned long           i2csr;
-       unsigned int            disable_delay;
+       unsigned int            disable_delay;
        int                     stopped;
        unsigned int            ifdr; /* IMX_I2C_IFDR */
        unsigned int            cur_clk;
@@ -295,7 +295,6 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
        dma->chan_tx = dma_request_slave_channel(dev, "tx");
        if (!dma->chan_tx) {
                dev_dbg(dev, "can't request DMA tx channel\n");
-               ret = -ENODEV;
                goto fail_al;
        }
 
@@ -313,7 +312,6 @@ static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx,
        dma->chan_rx = dma_request_slave_channel(dev, "rx");
        if (!dma->chan_rx) {
                dev_dbg(dev, "can't request DMA rx channel\n");
-               ret = -ENODEV;
                goto fail_tx;
        }
 
@@ -481,8 +479,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
        i2c_clk_rate = clk_get_rate(i2c_imx->clk);
        if (i2c_imx->cur_clk == i2c_clk_rate)
                return;
-       else
-               i2c_imx->cur_clk = i2c_clk_rate;
+
+       i2c_imx->cur_clk = i2c_clk_rate;
 
        div = (i2c_clk_rate + i2c_imx->bitrate - 1) / i2c_imx->bitrate;
        if (div < i2c_clk_div[0].div)
@@ -490,7 +488,8 @@ static void i2c_imx_set_clk(struct imx_i2c_struct *i2c_imx)
        else if (div > i2c_clk_div[i2c_imx->hwdata->ndivs - 1].div)
                i = i2c_imx->hwdata->ndivs - 1;
        else
-               for (i = 0; i2c_clk_div[i].div < div; i++);
+               for (i = 0; i2c_clk_div[i].div < div; i++)
+                       ;
 
        /* Store divider value */
        i2c_imx->ifdr = i2c_clk_div[i].val;
@@ -628,9 +627,9 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
        result = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
-       if (result <= 0) {
+       if (result == 0) {
                dmaengine_terminate_all(dma->chan_using);
-               return result ?: -ETIMEDOUT;
+               return -ETIMEDOUT;
        }
 
        /* Waiting for transfer complete. */
@@ -686,9 +685,9 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
        result = wait_for_completion_timeout(
                                &i2c_imx->dma->cmd_complete,
                                msecs_to_jiffies(DMA_TIMEOUT));
-       if (result <= 0) {
+       if (result == 0) {
                dmaengine_terminate_all(dma->chan_using);
-               return result ?: -ETIMEDOUT;
+               return -ETIMEDOUT;
        }
 
        /* waiting for transfer complete. */
@@ -822,6 +821,7 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo
        /* read data */
        for (i = 0; i < msgs->len; i++) {
                u8 len = 0;
+
                result = i2c_imx_trx_complete(i2c_imx);
                if (result)
                        return result;
@@ -917,15 +917,16 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter,
                /* write/read data */
 #ifdef CONFIG_I2C_DEBUG_BUS
                temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR);
-               dev_dbg(&i2c_imx->adapter.dev, "<%s> CONTROL: IEN=%d, IIEN=%d, "
-                       "MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n", __func__,
+               dev_dbg(&i2c_imx->adapter.dev,
+                       "<%s> CONTROL: IEN=%d, IIEN=%d, MSTA=%d, MTX=%d, TXAK=%d, RSTA=%d\n",
+                       __func__,
                        (temp & I2CR_IEN ? 1 : 0), (temp & I2CR_IIEN ? 1 : 0),
                        (temp & I2CR_MSTA ? 1 : 0), (temp & I2CR_MTX ? 1 : 0),
                        (temp & I2CR_TXAK ? 1 : 0), (temp & I2CR_RSTA ? 1 : 0));
                temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR);
                dev_dbg(&i2c_imx->adapter.dev,
-                       "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, "
-                       "IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n", __func__,
+                       "<%s> STATUS: ICF=%d, IAAS=%d, IBB=%d, IAL=%d, SRW=%d, IIF=%d, RXAK=%d\n",
+                       __func__,
                        (temp & I2SR_ICF ? 1 : 0), (temp & I2SR_IAAS ? 1 : 0),
                        (temp & I2SR_IBB ? 1 : 0), (temp & I2SR_IAL ? 1 : 0),
                        (temp & I2SR_SRW ? 1 : 0), (temp & I2SR_IIF ? 1 : 0),
@@ -1004,7 +1005,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
        i2c_imx->adapter.owner          = THIS_MODULE;
        i2c_imx->adapter.algo           = &i2c_imx_algo;
        i2c_imx->adapter.dev.parent     = &pdev->dev;
-       i2c_imx->adapter.nr             = pdev->id;
+       i2c_imx->adapter.nr             = pdev->id;
        i2c_imx->adapter.dev.of_node    = pdev->dev.of_node;
        i2c_imx->base                   = base;
 
@@ -1063,7 +1064,7 @@ static int i2c_imx_probe(struct platform_device *pdev)
                i2c_imx->adapter.name);
        dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n");
 
-       /* Init DMA config if support*/
+       /* Init DMA config if supported */
        i2c_imx_dma_request(i2c_imx, phy_addr);
 
        return 0;   /* Return OK */
index 7249b5b1e5d091bbd9d906d4bc16cd89fbd36d82..abf5db7e441ebab65fc7c8ad99b5f9bca6218b15 100644 (file)
@@ -12,6 +12,7 @@
  * kind, whether express or implied.
  */
 
+#include <linux/clk.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
 #include <linux/module.h>
@@ -35,7 +36,9 @@ struct ocores_i2c {
        int pos;
        int nmsgs;
        int state; /* see STATE_ */
-       int clock_khz;
+       struct clk *clk;
+       int ip_clock_khz;
+       int bus_clock_khz;
        void (*setreg)(struct ocores_i2c *i2c, int reg, u8 value);
        u8 (*getreg)(struct ocores_i2c *i2c, int reg);
 };
@@ -215,21 +218,34 @@ static int ocores_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
                return -ETIMEDOUT;
 }
 
-static void ocores_init(struct ocores_i2c *i2c)
+static int ocores_init(struct device *dev, struct ocores_i2c *i2c)
 {
        int prescale;
+       int diff;
        u8 ctrl = oc_getreg(i2c, OCI2C_CONTROL);
 
        /* make sure the device is disabled */
        oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN));
 
-       prescale = (i2c->clock_khz / (5*100)) - 1;
+       prescale = (i2c->ip_clock_khz / (5 * i2c->bus_clock_khz)) - 1;
+       prescale = clamp(prescale, 0, 0xffff);
+
+       diff = i2c->ip_clock_khz / (5 * (prescale + 1)) - i2c->bus_clock_khz;
+       if (abs(diff) > i2c->bus_clock_khz / 10) {
+               dev_err(dev,
+                       "Unsupported clock settings: core: %d KHz, bus: %d KHz\n",
+                       i2c->ip_clock_khz, i2c->bus_clock_khz);
+               return -EINVAL;
+       }
+
        oc_setreg(i2c, OCI2C_PRELOW, prescale & 0xff);
        oc_setreg(i2c, OCI2C_PREHIGH, prescale >> 8);
 
        /* Init the device */
        oc_setreg(i2c, OCI2C_CMD, OCI2C_CMD_IACK);
        oc_setreg(i2c, OCI2C_CONTROL, ctrl | OCI2C_CTRL_IEN | OCI2C_CTRL_EN);
+
+       return 0;
 }
 
 
@@ -304,6 +320,8 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
        struct device_node *np = pdev->dev.of_node;
        const struct of_device_id *match;
        u32 val;
+       u32 clock_frequency;
+       bool clock_frequency_present;
 
        if (of_property_read_u32(np, "reg-shift", &i2c->reg_shift)) {
                /* no 'reg-shift', check for deprecated 'regstep' */
@@ -319,12 +337,42 @@ static int ocores_i2c_of_probe(struct platform_device *pdev,
                }
        }
 
-       if (of_property_read_u32(np, "clock-frequency", &val)) {
-               dev_err(&pdev->dev,
-                       "Missing required parameter 'clock-frequency'\n");
-               return -ENODEV;
+       clock_frequency_present = !of_property_read_u32(np, "clock-frequency",
+                                                       &clock_frequency);
+       i2c->bus_clock_khz = 100;
+
+       i2c->clk = devm_clk_get(&pdev->dev, NULL);
+
+       if (!IS_ERR(i2c->clk)) {
+               int ret = clk_prepare_enable(i2c->clk);
+
+               if (ret) {
+                       dev_err(&pdev->dev,
+                               "clk_prepare_enable failed: %d\n", ret);
+                       return ret;
+               }
+               i2c->ip_clock_khz = clk_get_rate(i2c->clk) / 1000;
+               if (clock_frequency_present)
+                       i2c->bus_clock_khz = clock_frequency / 1000;
+       }
+
+       if (i2c->ip_clock_khz == 0) {
+               if (of_property_read_u32(np, "opencores,ip-clock-frequency",
+                                               &val)) {
+                       if (!clock_frequency_present) {
+                               dev_err(&pdev->dev,
+                                       "Missing required parameter 'opencores,ip-clock-frequency'\n");
+                               return -ENODEV;
+                       }
+                       i2c->ip_clock_khz = clock_frequency / 1000;
+                       dev_warn(&pdev->dev,
+                                "Deprecated usage of the 'clock-frequency' property, please update to 'opencores,ip-clock-frequency'\n");
+               } else {
+                       i2c->ip_clock_khz = val / 1000;
+                       if (clock_frequency_present)
+                               i2c->bus_clock_khz = clock_frequency / 1000;
+               }
        }
-       i2c->clock_khz = val / 1000;
 
        of_property_read_u32(pdev->dev.of_node, "reg-io-width",
                                &i2c->reg_io_width);
@@ -368,7 +416,8 @@ static int ocores_i2c_probe(struct platform_device *pdev)
        if (pdata) {
                i2c->reg_shift = pdata->reg_shift;
                i2c->reg_io_width = pdata->reg_io_width;
-               i2c->clock_khz = pdata->clock_khz;
+               i2c->ip_clock_khz = pdata->clock_khz;
+               i2c->bus_clock_khz = 100;
        } else {
                ret = ocores_i2c_of_probe(pdev, i2c);
                if (ret)
@@ -402,7 +451,9 @@ static int ocores_i2c_probe(struct platform_device *pdev)
                }
        }
 
-       ocores_init(i2c);
+       ret = ocores_init(&pdev->dev, i2c);
+       if (ret)
+               return ret;
 
        init_waitqueue_head(&i2c->wait);
        ret = devm_request_irq(&pdev->dev, irq, ocores_isr, 0,
@@ -446,6 +497,9 @@ static int ocores_i2c_remove(struct platform_device *pdev)
        /* remove adapter & data */
        i2c_del_adapter(&i2c->adap);
 
+       if (!IS_ERR(i2c->clk))
+               clk_disable_unprepare(i2c->clk);
+
        return 0;
 }
 
@@ -458,6 +512,8 @@ static int ocores_i2c_suspend(struct device *dev)
        /* make sure the device is disabled */
        oc_setreg(i2c, OCI2C_CONTROL, ctrl & ~(OCI2C_CTRL_EN|OCI2C_CTRL_IEN));
 
+       if (!IS_ERR(i2c->clk))
+               clk_disable_unprepare(i2c->clk);
        return 0;
 }
 
@@ -465,9 +521,20 @@ static int ocores_i2c_resume(struct device *dev)
 {
        struct ocores_i2c *i2c = dev_get_drvdata(dev);
 
-       ocores_init(i2c);
+       if (!IS_ERR(i2c->clk)) {
+               unsigned long rate;
+               int ret = clk_prepare_enable(i2c->clk);
 
-       return 0;
+               if (ret) {
+                       dev_err(dev,
+                               "clk_prepare_enable failed: %d\n", ret);
+                       return ret;
+               }
+               rate = clk_get_rate(i2c->clk) / 1000;
+               if (rate)
+                       i2c->ip_clock_khz = rate;
+       }
+       return ocores_init(dev, i2c);
 }
 
 static SIMPLE_DEV_PM_OPS(ocores_i2c_pm, ocores_i2c_suspend, ocores_i2c_resume);
index 44f03eed00dd4f36ae655df26315699556a576a8..d37d9db6681e7b5745a45331ae60737a9b36f99a 100644 (file)
@@ -148,13 +148,6 @@ static inline u32 pmcmsptwi_clock_to_reg(
        return ((clock->filter & 0xf) << 12) | (clock->clock & 0x03ff);
 }
 
-static inline void pmcmsptwi_reg_to_clock(
-                       u32 reg, struct pmcmsptwi_clock *clock)
-{
-       clock->filter = (reg >> 12) & 0xf;
-       clock->clock = reg & 0x03ff;
-}
-
 static inline u32 pmcmsptwi_cfg_to_reg(const struct pmcmsptwi_cfg *cfg)
 {
        return ((cfg->arbf & 0xf) << 12) |
index 92462843db663d09b1106468603b1b9eb65243d2..5f96b1b3e3a5a30e2163098e4afe94fc4a06deeb 100644 (file)
@@ -102,6 +102,9 @@ struct rk3x_i2c {
 
        /* Settings */
        unsigned int scl_frequency;
+       unsigned int scl_rise_ns;
+       unsigned int scl_fall_ns;
+       unsigned int sda_fall_ns;
 
        /* Synchronization & notification */
        spinlock_t lock;
@@ -435,6 +438,9 @@ out:
  *
  * @clk_rate: I2C input clock rate
  * @scl_rate: Desired SCL rate
+ * @scl_rise_ns: How many ns it takes for SCL to rise.
+ * @scl_fall_ns: How many ns it takes for SCL to fall.
+ * @sda_fall_ns: How many ns it takes for SDA to fall.
  * @div_low: Divider output for low
  * @div_high: Divider output for high
  *
@@ -443,11 +449,16 @@ out:
  * too high, we silently use the highest possible rate.
  */
 static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
+                             unsigned long scl_rise_ns,
+                             unsigned long scl_fall_ns,
+                             unsigned long sda_fall_ns,
                              unsigned long *div_low, unsigned long *div_high)
 {
-       unsigned long min_low_ns, min_high_ns;
-       unsigned long max_data_hold_ns;
+       unsigned long spec_min_low_ns, spec_min_high_ns;
+       unsigned long spec_setup_start, spec_max_data_hold_ns;
        unsigned long data_hold_buffer_ns;
+
+       unsigned long min_low_ns, min_high_ns;
        unsigned long max_low_ns, min_total_ns;
 
        unsigned long clk_rate_khz, scl_rate_khz;
@@ -469,29 +480,50 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
                scl_rate = 1000;
 
        /*
-        * min_low_ns:  The minimum number of ns we need to hold low
-        *              to meet i2c spec
-        * min_high_ns: The minimum number of ns we need to hold high
-        *              to meet i2c spec
-        * max_low_ns:  The maximum number of ns we can hold low
-        *              to meet i2c spec
+        * min_low_ns:  The minimum number of ns we need to hold low to
+        *              meet I2C specification, should include fall time.
+        * min_high_ns: The minimum number of ns we need to hold high to
+        *              meet I2C specification, should include rise time.
+        * max_low_ns:  The maximum number of ns we can hold low to meet
+        *              I2C specification.
         *
-        * Note: max_low_ns should be (max data hold time * 2 - buffer)
+        * Note: max_low_ns should be (maximum data hold time * 2 - buffer)
         *       This is because the i2c host on Rockchip holds the data line
         *       for half the low time.
         */
        if (scl_rate <= 100000) {
-               min_low_ns = 4700;
-               min_high_ns = 4000;
-               max_data_hold_ns = 3450;
+               /* Standard-mode */
+               spec_min_low_ns = 4700;
+               spec_setup_start = 4700;
+               spec_min_high_ns = 4000;
+               spec_max_data_hold_ns = 3450;
                data_hold_buffer_ns = 50;
        } else {
-               min_low_ns = 1300;
-               min_high_ns = 600;
-               max_data_hold_ns = 900;
+               /* Fast-mode */
+               spec_min_low_ns = 1300;
+               spec_setup_start = 600;
+               spec_min_high_ns = 600;
+               spec_max_data_hold_ns = 900;
                data_hold_buffer_ns = 50;
        }
-       max_low_ns = max_data_hold_ns * 2 - data_hold_buffer_ns;
+       min_high_ns = scl_rise_ns + spec_min_high_ns;
+
+       /*
+        * Timings for repeated start:
+        * - controller appears to drop SDA at .875x (7/8) programmed clk high.
+        * - controller appears to keep SCL high for 2x programmed clk high.
+        *
+        * We need to account for those rules in picking our "high" time so
+        * we meet tSU;STA and tHD;STA times.
+        */
+       min_high_ns = max(min_high_ns,
+               DIV_ROUND_UP((scl_rise_ns + spec_setup_start) * 1000, 875));
+       min_high_ns = max(min_high_ns,
+               DIV_ROUND_UP((scl_rise_ns + spec_setup_start +
+                             sda_fall_ns + spec_min_high_ns), 2));
+
+       min_low_ns = scl_fall_ns + spec_min_low_ns;
+       max_low_ns = spec_max_data_hold_ns * 2 - data_hold_buffer_ns;
        min_total_ns = min_low_ns + min_high_ns;
 
        /* Adjust to avoid overflow */
@@ -510,8 +542,8 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
        min_div_for_hold = (min_low_div + min_high_div);
 
        /*
-        * This is the maximum divider so we don't go over the max.
-        * We don't round up here (we round down) since this is a max.
+        * This is the maximum divider so we don't go over the maximum.
+        * We don't round up here (we round down) since this is a maximum.
         */
        max_low_div = clk_rate_khz * max_low_ns / (8 * 1000000);
 
@@ -544,7 +576,7 @@ static int rk3x_i2c_calc_divs(unsigned long clk_rate, unsigned long scl_rate,
                ideal_low_div = DIV_ROUND_UP(clk_rate_khz * min_low_ns,
                                             scl_rate_khz * 8 * min_total_ns);
 
-               /* Don't allow it to go over the max */
+               /* Don't allow it to go over the maximum */
                if (ideal_low_div > max_low_div)
                        ideal_low_div = max_low_div;
 
@@ -588,9 +620,9 @@ static void rk3x_i2c_adapt_div(struct rk3x_i2c *i2c, unsigned long clk_rate)
        u64 t_low_ns, t_high_ns;
        int ret;
 
-       ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, &div_low,
-                                &div_high);
-
+       ret = rk3x_i2c_calc_divs(clk_rate, i2c->scl_frequency, i2c->scl_rise_ns,
+                                i2c->scl_fall_ns, i2c->sda_fall_ns,
+                                &div_low, &div_high);
        WARN_ONCE(ret != 0, "Could not reach SCL freq %u", i2c->scl_frequency);
 
        clk_enable(i2c->clk);
@@ -633,9 +665,10 @@ static int rk3x_i2c_clk_notifier_cb(struct notifier_block *nb, unsigned long
        switch (event) {
        case PRE_RATE_CHANGE:
                if (rk3x_i2c_calc_divs(ndata->new_rate, i2c->scl_frequency,
-                                     &div_low, &div_high) != 0) {
+                                      i2c->scl_rise_ns, i2c->scl_fall_ns,
+                                      i2c->sda_fall_ns,
+                                      &div_low, &div_high) != 0)
                        return NOTIFY_STOP;
-               }
 
                /* scale up */
                if (ndata->new_rate > ndata->old_rate)
@@ -859,6 +892,24 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
                i2c->scl_frequency = DEFAULT_SCL_RATE;
        }
 
+       /*
+        * Read rise and fall time from device tree. If not available use
+        * the default maximum timing from the specification.
+        */
+       if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-rising-time-ns",
+                                &i2c->scl_rise_ns)) {
+               if (i2c->scl_frequency <= 100000)
+                       i2c->scl_rise_ns = 1000;
+               else
+                       i2c->scl_rise_ns = 300;
+       }
+       if (of_property_read_u32(pdev->dev.of_node, "i2c-scl-falling-time-ns",
+                                &i2c->scl_fall_ns))
+               i2c->scl_fall_ns = 300;
+       if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
+                                &i2c->scl_fall_ns))
+               i2c->sda_fall_ns = i2c->scl_fall_ns;
+
        strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
        i2c->adap.owner = THIS_MODULE;
        i2c->adap.algo = &rk3x_i2c_algorithm;
index 28b87e683503df42c2bc11ee36c95b505e04f884..29f14331dd9d01fcb5d66f1e74602b61da9593cf 100644 (file)
@@ -286,6 +286,7 @@ static int tegra_i2c_empty_rx_fifo(struct tegra_i2c_dev *i2c_dev)
        if (rx_fifo_avail > 0 && buf_remaining > 0) {
                BUG_ON(buf_remaining > 3);
                val = i2c_readl(i2c_dev, I2C_RX_FIFO);
+               val = cpu_to_le32(val);
                memcpy(buf, &val, buf_remaining);
                buf_remaining = 0;
                rx_fifo_avail--;
@@ -344,6 +345,7 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev)
        if (tx_fifo_avail > 0 && buf_remaining > 0) {
                BUG_ON(buf_remaining > 3);
                memcpy(&val, buf, buf_remaining);
+               val = le32_to_cpu(val);
 
                /* Again update before writing to FIFO to make sure isr sees. */
                i2c_dev->msg_buf_remaining = 0;
index e9eae57a2b50f77e3d25c4d9fcfa003728464740..210cf4874cb7ea2415df5fb3e1d30ec8065de5d4 100644 (file)
@@ -102,7 +102,7 @@ static int acpi_i2c_add_resource(struct acpi_resource *ares, void *data)
                struct acpi_resource_i2c_serialbus *sb;
 
                sb = &ares->data.i2c_serial_bus;
-               if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
+               if (!info->addr && sb->type == ACPI_RESOURCE_SERIAL_TYPE_I2C) {
                        info->addr = sb->slave_address;
                        if (sb->access_mode == ACPI_I2C_10BIT_MODE)
                                info->flags |= I2C_CLIENT_TEN;
@@ -698,101 +698,6 @@ static void i2c_device_shutdown(struct device *dev)
                driver->shutdown(client);
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int i2c_legacy_suspend(struct device *dev, pm_message_t mesg)
-{
-       struct i2c_client *client = i2c_verify_client(dev);
-       struct i2c_driver *driver;
-
-       if (!client || !dev->driver)
-               return 0;
-       driver = to_i2c_driver(dev->driver);
-       if (!driver->suspend)
-               return 0;
-       return driver->suspend(client, mesg);
-}
-
-static int i2c_legacy_resume(struct device *dev)
-{
-       struct i2c_client *client = i2c_verify_client(dev);
-       struct i2c_driver *driver;
-
-       if (!client || !dev->driver)
-               return 0;
-       driver = to_i2c_driver(dev->driver);
-       if (!driver->resume)
-               return 0;
-       return driver->resume(client);
-}
-
-static int i2c_device_pm_suspend(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_suspend(dev);
-       else
-               return i2c_legacy_suspend(dev, PMSG_SUSPEND);
-}
-
-static int i2c_device_pm_resume(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_resume(dev);
-       else
-               return i2c_legacy_resume(dev);
-}
-
-static int i2c_device_pm_freeze(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_freeze(dev);
-       else
-               return i2c_legacy_suspend(dev, PMSG_FREEZE);
-}
-
-static int i2c_device_pm_thaw(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_thaw(dev);
-       else
-               return i2c_legacy_resume(dev);
-}
-
-static int i2c_device_pm_poweroff(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_poweroff(dev);
-       else
-               return i2c_legacy_suspend(dev, PMSG_HIBERNATE);
-}
-
-static int i2c_device_pm_restore(struct device *dev)
-{
-       const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
-
-       if (pm)
-               return pm_generic_restore(dev);
-       else
-               return i2c_legacy_resume(dev);
-}
-#else /* !CONFIG_PM_SLEEP */
-#define i2c_device_pm_suspend  NULL
-#define i2c_device_pm_resume   NULL
-#define i2c_device_pm_freeze   NULL
-#define i2c_device_pm_thaw     NULL
-#define i2c_device_pm_poweroff NULL
-#define i2c_device_pm_restore  NULL
-#endif /* !CONFIG_PM_SLEEP */
-
 static void i2c_client_dev_release(struct device *dev)
 {
        kfree(to_i2c_client(dev));
@@ -804,6 +709,7 @@ show_name(struct device *dev, struct device_attribute *attr, char *buf)
        return sprintf(buf, "%s\n", dev->type == &i2c_client_type ?
                       to_i2c_client(dev)->name : to_i2c_adapter(dev)->name);
 }
+static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 
 static ssize_t
 show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
@@ -817,8 +723,6 @@ show_modalias(struct device *dev, struct device_attribute *attr, char *buf)
 
        return sprintf(buf, "%s%s\n", I2C_MODULE_PREFIX, client->name);
 }
-
-static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 static DEVICE_ATTR(modalias, S_IRUGO, show_modalias, NULL);
 
 static struct attribute *i2c_dev_attrs[] = {
@@ -827,29 +731,7 @@ static struct attribute *i2c_dev_attrs[] = {
        &dev_attr_modalias.attr,
        NULL
 };
-
-static struct attribute_group i2c_dev_attr_group = {
-       .attrs          = i2c_dev_attrs,
-};
-
-static const struct attribute_group *i2c_dev_attr_groups[] = {
-       &i2c_dev_attr_group,
-       NULL
-};
-
-static const struct dev_pm_ops i2c_device_pm_ops = {
-       .suspend = i2c_device_pm_suspend,
-       .resume = i2c_device_pm_resume,
-       .freeze = i2c_device_pm_freeze,
-       .thaw = i2c_device_pm_thaw,
-       .poweroff = i2c_device_pm_poweroff,
-       .restore = i2c_device_pm_restore,
-       SET_RUNTIME_PM_OPS(
-               pm_generic_runtime_suspend,
-               pm_generic_runtime_resume,
-               NULL
-       )
-};
+ATTRIBUTE_GROUPS(i2c_dev);
 
 struct bus_type i2c_bus_type = {
        .name           = "i2c",
@@ -857,12 +739,11 @@ struct bus_type i2c_bus_type = {
        .probe          = i2c_device_probe,
        .remove         = i2c_device_remove,
        .shutdown       = i2c_device_shutdown,
-       .pm             = &i2c_device_pm_ops,
 };
 EXPORT_SYMBOL_GPL(i2c_bus_type);
 
 static struct device_type i2c_client_type = {
-       .groups         = i2c_dev_attr_groups,
+       .groups         = i2c_dev_groups,
        .uevent         = i2c_device_uevent,
        .release        = i2c_client_dev_release,
 };
@@ -1261,6 +1142,7 @@ i2c_sysfs_new_device(struct device *dev, struct device_attribute *attr,
 
        return count;
 }
+static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
 
 /*
  * And of course let the users delete the devices they instantiated, if
@@ -1315,8 +1197,6 @@ i2c_sysfs_delete_device(struct device *dev, struct device_attribute *attr,
                        "delete_device");
        return res;
 }
-
-static DEVICE_ATTR(new_device, S_IWUSR, NULL, i2c_sysfs_new_device);
 static DEVICE_ATTR_IGNORE_LOCKDEP(delete_device, S_IWUSR, NULL,
                                   i2c_sysfs_delete_device);
 
@@ -1326,18 +1206,10 @@ static struct attribute *i2c_adapter_attrs[] = {
        &dev_attr_delete_device.attr,
        NULL
 };
-
-static struct attribute_group i2c_adapter_attr_group = {
-       .attrs          = i2c_adapter_attrs,
-};
-
-static const struct attribute_group *i2c_adapter_attr_groups[] = {
-       &i2c_adapter_attr_group,
-       NULL
-};
+ATTRIBUTE_GROUPS(i2c_adapter);
 
 struct device_type i2c_adapter_type = {
-       .groups         = i2c_adapter_attr_groups,
+       .groups         = i2c_adapter_groups,
        .release        = i2c_adapter_dev_release,
 };
 EXPORT_SYMBOL_GPL(i2c_adapter_type);
@@ -1419,8 +1291,6 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
        if (of_get_property(node, "wakeup-source", NULL))
                info.flags |= I2C_CLIENT_WAKE;
 
-       request_module("%s%s", I2C_MODULE_PREFIX, info.type);
-
        result = i2c_new_device(adap, &info);
        if (result == NULL) {
                dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
@@ -1796,11 +1666,15 @@ void i2c_del_adapter(struct i2c_adapter *adap)
        /* device name is gone after device_unregister */
        dev_dbg(&adap->dev, "adapter [%s] unregistered\n", adap->name);
 
-       /* clean up the sysfs representation */
+       /* wait until all references to the device are gone
+        *
+        * FIXME: This is old code and should ideally be replaced by an
+        * alternative which results in decoupling the lifetime of the struct
+        * device from the i2c_adapter, like spi or netdev do. Any solution
+        * should be throughly tested with DEBUG_KOBJECT_RELEASE enabled!
+        */
        init_completion(&adap->dev_released);
        device_unregister(&adap->dev);
-
-       /* wait for sysfs to drop all references */
        wait_for_completion(&adap->dev_released);
 
        /* free bus id */
@@ -1859,14 +1733,6 @@ int i2c_register_driver(struct module *owner, struct i2c_driver *driver)
        if (res)
                return res;
 
-       /* Drivers should switch to dev_pm_ops instead. */
-       if (driver->suspend)
-               pr_warn("i2c-core: driver [%s] using legacy suspend method\n",
-                       driver->driver.name);
-       if (driver->resume)
-               pr_warn("i2c-core: driver [%s] using legacy resume method\n",
-                       driver->driver.name);
-
        pr_debug("i2c-core: driver [%s] registered\n", driver->driver.name);
 
        INIT_LIST_HEAD(&driver->clients);
index ec11b404b433737657957ef9dd0e559a955b45aa..3d8f4fe2e47e52eefff7da7967fe41bf0fafe801 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/i2c-mux.h>
 #include <linux/i2c/pca954x.h>
 #include <linux/module.h>
+#include <linux/of.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
 
@@ -186,6 +187,8 @@ static int pca954x_probe(struct i2c_client *client,
 {
        struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent);
        struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev);
+       struct device_node *of_node = client->dev.of_node;
+       bool idle_disconnect_dt;
        struct gpio_desc *gpio;
        int num, force, class;
        struct pca954x *data;
@@ -217,8 +220,13 @@ static int pca954x_probe(struct i2c_client *client,
        data->type = id->driver_data;
        data->last_chan = 0;               /* force the first selection */
 
+       idle_disconnect_dt = of_node &&
+               of_property_read_bool(of_node, "i2c-mux-idle-disconnect");
+
        /* Now create an adapter for each channel */
        for (num = 0; num < chips[data->type].nchans; num++) {
+               bool idle_disconnect_pd = false;
+
                force = 0;                        /* dynamic adap number */
                class = 0;                        /* no class by default */
                if (pdata) {
@@ -229,12 +237,13 @@ static int pca954x_probe(struct i2c_client *client,
                        } else
                                /* discard unconfigured channels */
                                break;
+                       idle_disconnect_pd = pdata->modes[num].deselect_on_exit;
                }
 
                data->virt_adaps[num] =
                        i2c_add_mux_adapter(adap, &client->dev, client,
                                force, num, class, pca954x_select_chan,
-                               (pdata && pdata->modes[num].deselect_on_exit)
+                               (idle_disconnect_pd || idle_disconnect_dt)
                                        ? pca954x_deselect_mux : NULL);
 
                if (data->virt_adaps[num] == NULL) {
index 4132935dc929a5a891b36908efd50978a67f7ea6..4011effe4c05d972959fb8fe9c3db297ee248421 100644 (file)
@@ -21,7 +21,7 @@ config IIO_BUFFER
 if IIO_BUFFER
 
 config IIO_BUFFER_CB
-boolean "IIO callback buffer used for push in-kernel interfaces"
+       bool "IIO callback buffer used for push in-kernel interfaces"
        help
          Should be selected by any drivers that do in-kernel push
          usage.  That is, those where the data is pushed to the consumer.
@@ -43,7 +43,7 @@ config IIO_TRIGGERED_BUFFER
 endif # IIO_BUFFER
 
 config IIO_TRIGGER
-       boolean "Enable triggered sampling support"
+       bool "Enable triggered sampling support"
        help
          Provides IIO core support for triggers.  Currently these
          are used to initialize capture of samples to push into
index 56a4b7ca7ee38ba118796d6d5e3eb770bbebc69a..45d67e9228d75b44d71354d248cf97140dfe37ad 100644 (file)
@@ -1124,6 +1124,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
        if (!optlen)
                return -EINVAL;
 
+       memset(&sa_path, 0, sizeof(sa_path));
+       sa_path.vlan_id = 0xffff;
+
        ib_sa_unpack_path(path_data->path_rec, &sa_path);
        ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1);
        if (ret)
index 6095872549e79fb0dd9c0b3702c4eb01e974fb3f..8b8cc6fa0ab0c1ebf966b2ace4932807d9181bf1 100644 (file)
@@ -294,7 +294,8 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem)
        if (likely(ib_umem_start(umem) != ib_umem_end(umem)))
                rbt_ib_umem_insert(&umem->odp_data->interval_tree,
                                   &context->umem_tree);
-       if (likely(!atomic_read(&context->notifier_count)))
+       if (likely(!atomic_read(&context->notifier_count)) ||
+           context->odp_mrs_count == 1)
                umem->odp_data->mn_counters_active = true;
        else
                list_add(&umem->odp_data->no_private_counters,
index 643c08a025a52d015431b8a27be1ddcacbd36845..b716b08156446e186c9ae608f3f4e6343c6f200f 100644 (file)
@@ -258,5 +258,6 @@ IB_UVERBS_DECLARE_CMD(close_xrcd);
 
 IB_UVERBS_DECLARE_EX_CMD(create_flow);
 IB_UVERBS_DECLARE_EX_CMD(destroy_flow);
+IB_UVERBS_DECLARE_EX_CMD(query_device);
 
 #endif /* UVERBS_H */
index b7943ff16ed3f2edece8ec4cc3c7931594bd943a..a9f048990dfcd833de09978c0448ad979749e4c9 100644 (file)
@@ -400,6 +400,52 @@ err:
        return ret;
 }
 
+static void copy_query_dev_fields(struct ib_uverbs_file *file,
+                                 struct ib_uverbs_query_device_resp *resp,
+                                 struct ib_device_attr *attr)
+{
+       resp->fw_ver            = attr->fw_ver;
+       resp->node_guid         = file->device->ib_dev->node_guid;
+       resp->sys_image_guid    = attr->sys_image_guid;
+       resp->max_mr_size       = attr->max_mr_size;
+       resp->page_size_cap     = attr->page_size_cap;
+       resp->vendor_id         = attr->vendor_id;
+       resp->vendor_part_id    = attr->vendor_part_id;
+       resp->hw_ver            = attr->hw_ver;
+       resp->max_qp            = attr->max_qp;
+       resp->max_qp_wr         = attr->max_qp_wr;
+       resp->device_cap_flags  = attr->device_cap_flags;
+       resp->max_sge           = attr->max_sge;
+       resp->max_sge_rd        = attr->max_sge_rd;
+       resp->max_cq            = attr->max_cq;
+       resp->max_cqe           = attr->max_cqe;
+       resp->max_mr            = attr->max_mr;
+       resp->max_pd            = attr->max_pd;
+       resp->max_qp_rd_atom    = attr->max_qp_rd_atom;
+       resp->max_ee_rd_atom    = attr->max_ee_rd_atom;
+       resp->max_res_rd_atom   = attr->max_res_rd_atom;
+       resp->max_qp_init_rd_atom       = attr->max_qp_init_rd_atom;
+       resp->max_ee_init_rd_atom       = attr->max_ee_init_rd_atom;
+       resp->atomic_cap                = attr->atomic_cap;
+       resp->max_ee                    = attr->max_ee;
+       resp->max_rdd                   = attr->max_rdd;
+       resp->max_mw                    = attr->max_mw;
+       resp->max_raw_ipv6_qp           = attr->max_raw_ipv6_qp;
+       resp->max_raw_ethy_qp           = attr->max_raw_ethy_qp;
+       resp->max_mcast_grp             = attr->max_mcast_grp;
+       resp->max_mcast_qp_attach       = attr->max_mcast_qp_attach;
+       resp->max_total_mcast_qp_attach = attr->max_total_mcast_qp_attach;
+       resp->max_ah                    = attr->max_ah;
+       resp->max_fmr                   = attr->max_fmr;
+       resp->max_map_per_fmr           = attr->max_map_per_fmr;
+       resp->max_srq                   = attr->max_srq;
+       resp->max_srq_wr                = attr->max_srq_wr;
+       resp->max_srq_sge               = attr->max_srq_sge;
+       resp->max_pkeys                 = attr->max_pkeys;
+       resp->local_ca_ack_delay        = attr->local_ca_ack_delay;
+       resp->phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
+}
+
 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
                               const char __user *buf,
                               int in_len, int out_len)
@@ -420,47 +466,7 @@ ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
                return ret;
 
        memset(&resp, 0, sizeof resp);
-
-       resp.fw_ver                    = attr.fw_ver;
-       resp.node_guid                 = file->device->ib_dev->node_guid;
-       resp.sys_image_guid            = attr.sys_image_guid;
-       resp.max_mr_size               = attr.max_mr_size;
-       resp.page_size_cap             = attr.page_size_cap;
-       resp.vendor_id                 = attr.vendor_id;
-       resp.vendor_part_id            = attr.vendor_part_id;
-       resp.hw_ver                    = attr.hw_ver;
-       resp.max_qp                    = attr.max_qp;
-       resp.max_qp_wr                 = attr.max_qp_wr;
-       resp.device_cap_flags          = attr.device_cap_flags;
-       resp.max_sge                   = attr.max_sge;
-       resp.max_sge_rd                = attr.max_sge_rd;
-       resp.max_cq                    = attr.max_cq;
-       resp.max_cqe                   = attr.max_cqe;
-       resp.max_mr                    = attr.max_mr;
-       resp.max_pd                    = attr.max_pd;
-       resp.max_qp_rd_atom            = attr.max_qp_rd_atom;
-       resp.max_ee_rd_atom            = attr.max_ee_rd_atom;
-       resp.max_res_rd_atom           = attr.max_res_rd_atom;
-       resp.max_qp_init_rd_atom       = attr.max_qp_init_rd_atom;
-       resp.max_ee_init_rd_atom       = attr.max_ee_init_rd_atom;
-       resp.atomic_cap                = attr.atomic_cap;
-       resp.max_ee                    = attr.max_ee;
-       resp.max_rdd                   = attr.max_rdd;
-       resp.max_mw                    = attr.max_mw;
-       resp.max_raw_ipv6_qp           = attr.max_raw_ipv6_qp;
-       resp.max_raw_ethy_qp           = attr.max_raw_ethy_qp;
-       resp.max_mcast_grp             = attr.max_mcast_grp;
-       resp.max_mcast_qp_attach       = attr.max_mcast_qp_attach;
-       resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
-       resp.max_ah                    = attr.max_ah;
-       resp.max_fmr                   = attr.max_fmr;
-       resp.max_map_per_fmr           = attr.max_map_per_fmr;
-       resp.max_srq                   = attr.max_srq;
-       resp.max_srq_wr                = attr.max_srq_wr;
-       resp.max_srq_sge               = attr.max_srq_sge;
-       resp.max_pkeys                 = attr.max_pkeys;
-       resp.local_ca_ack_delay        = attr.local_ca_ack_delay;
-       resp.phys_port_cnt             = file->device->ib_dev->phys_port_cnt;
+       copy_query_dev_fields(file, &resp, &attr);
 
        if (copy_to_user((void __user *) (unsigned long) cmd.response,
                         &resp, sizeof resp))
@@ -2091,20 +2097,21 @@ ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
        if (qp->real_qp == qp) {
                ret = ib_resolve_eth_l2_attrs(qp, attr, &cmd.attr_mask);
                if (ret)
-                       goto out;
+                       goto release_qp;
                ret = qp->device->modify_qp(qp, attr,
                        modify_qp_mask(qp->qp_type, cmd.attr_mask), &udata);
        } else {
                ret = ib_modify_qp(qp, attr, modify_qp_mask(qp->qp_type, cmd.attr_mask));
        }
 
-       put_qp_read(qp);
-
        if (ret)
-               goto out;
+               goto release_qp;
 
        ret = in_len;
 
+release_qp:
+       put_qp_read(qp);
+
 out:
        kfree(attr);
 
@@ -3287,3 +3294,64 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
 
        return ret ? ret : in_len;
 }
+
+int ib_uverbs_ex_query_device(struct ib_uverbs_file *file,
+                             struct ib_udata *ucore,
+                             struct ib_udata *uhw)
+{
+       struct ib_uverbs_ex_query_device_resp resp;
+       struct ib_uverbs_ex_query_device  cmd;
+       struct ib_device_attr attr;
+       struct ib_device *device;
+       int err;
+
+       device = file->device->ib_dev;
+       if (ucore->inlen < sizeof(cmd))
+               return -EINVAL;
+
+       err = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
+       if (err)
+               return err;
+
+       if (cmd.comp_mask)
+               return -EINVAL;
+
+       if (cmd.reserved)
+               return -EINVAL;
+
+       resp.response_length = offsetof(typeof(resp), odp_caps);
+
+       if (ucore->outlen < resp.response_length)
+               return -ENOSPC;
+
+       err = device->query_device(device, &attr);
+       if (err)
+               return err;
+
+       copy_query_dev_fields(file, &resp.base, &attr);
+       resp.comp_mask = 0;
+
+       if (ucore->outlen < resp.response_length + sizeof(resp.odp_caps))
+               goto end;
+
+#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
+       resp.odp_caps.general_caps = attr.odp_caps.general_caps;
+       resp.odp_caps.per_transport_caps.rc_odp_caps =
+               attr.odp_caps.per_transport_caps.rc_odp_caps;
+       resp.odp_caps.per_transport_caps.uc_odp_caps =
+               attr.odp_caps.per_transport_caps.uc_odp_caps;
+       resp.odp_caps.per_transport_caps.ud_odp_caps =
+               attr.odp_caps.per_transport_caps.ud_odp_caps;
+       resp.odp_caps.reserved = 0;
+#else
+       memset(&resp.odp_caps, 0, sizeof(resp.odp_caps));
+#endif
+       resp.response_length += sizeof(resp.odp_caps);
+
+end:
+       err = ib_copy_to_udata(ucore, &resp, resp.response_length);
+       if (err)
+               return err;
+
+       return 0;
+}
index 5db1a8cc388da0c5de517bf69b3d8136b94a1bbf..259dcc7779f5e01bc95b66ca90e64d20f7c94087 100644 (file)
@@ -123,6 +123,7 @@ static int (*uverbs_ex_cmd_table[])(struct ib_uverbs_file *file,
                                    struct ib_udata *uhw) = {
        [IB_USER_VERBS_EX_CMD_CREATE_FLOW]      = ib_uverbs_ex_create_flow,
        [IB_USER_VERBS_EX_CMD_DESTROY_FLOW]     = ib_uverbs_ex_destroy_flow,
+       [IB_USER_VERBS_EX_CMD_QUERY_DEVICE]     = ib_uverbs_ex_query_device,
 };
 
 static void ib_uverbs_add_one(struct ib_device *device);
index 794555dc86a598a78125edc38a01299157e9caeb..bdfac2ccb704ab43403a5245448ef1184143e738 100644 (file)
@@ -225,13 +225,20 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
        struct c4iw_cq *chp;
        unsigned long flag;
 
+       spin_lock_irqsave(&dev->lock, flag);
        chp = get_chp(dev, qid);
        if (chp) {
+               atomic_inc(&chp->refcnt);
+               spin_unlock_irqrestore(&dev->lock, flag);
                t4_clear_cq_armed(&chp->cq);
                spin_lock_irqsave(&chp->comp_handler_lock, flag);
                (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
                spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
-       } else
+               if (atomic_dec_and_test(&chp->refcnt))
+                       wake_up(&chp->wait);
+       } else {
                PDBG("%s unknown cqid 0x%x\n", __func__, qid);
+               spin_unlock_irqrestore(&dev->lock, flag);
+       }
        return 0;
 }
index b5678ac97393ab94ba73b6b1f396ae801801c480..d87e1650f6437835f3660c21d3a59ec920fa8f7c 100644 (file)
@@ -196,7 +196,7 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
        return (int)(rdev->lldi.vr->stag.size >> 5);
 }
 
-#define C4IW_WR_TO (30*HZ)
+#define C4IW_WR_TO (60*HZ)
 
 struct c4iw_wr_wait {
        struct completion completion;
@@ -220,22 +220,21 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
                                 u32 hwtid, u32 qpid,
                                 const char *func)
 {
-       unsigned to = C4IW_WR_TO;
        int ret;
 
-       do {
-               ret = wait_for_completion_timeout(&wr_waitp->completion, to);
-               if (!ret) {
-                       printk(KERN_ERR MOD "%s - Device %s not responding - "
-                              "tid %u qpid %u\n", func,
-                              pci_name(rdev->lldi.pdev), hwtid, qpid);
-                       if (c4iw_fatal_error(rdev)) {
-                               wr_waitp->ret = -EIO;
-                               break;
-                       }
-                       to = to << 2;
-               }
-       } while (!ret);
+       if (c4iw_fatal_error(rdev)) {
+               wr_waitp->ret = -EIO;
+               goto out;
+       }
+
+       ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
+       if (!ret) {
+               PDBG("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
+                    func, pci_name(rdev->lldi.pdev), hwtid, qpid);
+               rdev->flags |= T4_FATAL_ERROR;
+               wr_waitp->ret = -EIO;
+       }
+out:
        if (wr_waitp->ret)
                PDBG("%s: FW reply %d tid %u qpid %u\n",
                     pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid);
index 4977082e081f2303542d34ef230c5d199b2b2f31..33c45dfcbd88cb11a05c637e22159cc34008a718 100644 (file)
@@ -277,7 +277,7 @@ static int remove_file(struct dentry *parent, char *name)
        }
 
        spin_lock(&tmp->d_lock);
-       if (!(d_unhashed(tmp) && tmp->d_inode)) {
+       if (!d_unhashed(tmp) && tmp->d_inode) {
                dget_dlock(tmp);
                __d_drop(tmp);
                spin_unlock(&tmp->d_lock);
index 6559af60bffd62fbf162320379ff545ca4c974fc..e08db7020cd4939809dd456882ff3d4e69480480 100644 (file)
@@ -908,9 +908,6 @@ void ipath_chip_cleanup(struct ipath_devdata *);
 /* clean up any chip type-specific stuff */
 void ipath_chip_done(void);
 
-/* check to see if we have to force ordering for write combining */
-int ipath_unordered_wc(void);
-
 void ipath_disarm_piobufs(struct ipath_devdata *, unsigned first,
                          unsigned cnt);
 void ipath_cancel_sends(struct ipath_devdata *, int);
index 1d7bd82a1fb1fa3ffe8c4cebf08a3cd1b57529ce..1a7e20a75149ce6a6bab980b466585784595c0f1 100644 (file)
@@ -47,16 +47,3 @@ int ipath_enable_wc(struct ipath_devdata *dd)
 {
        return 0;
 }
-
-/**
- * ipath_unordered_wc - indicate whether write combining is unordered
- *
- * Because our performance depends on our ability to do write
- * combining mmio writes in the most efficient way, we need to
- * know if we are on a processor that may reorder stores when
- * write combining.
- */
-int ipath_unordered_wc(void)
-{
-       return 1;
-}
index 3428acb0868c202383304105546fc023d3a0917e..4ad0b932df1fab1c1897f144db9ffc8af35c5f73 100644 (file)
@@ -167,18 +167,3 @@ void ipath_disable_wc(struct ipath_devdata *dd)
                dd->ipath_wc_cookie = 0; /* even on failure */
        }
 }
-
-/**
- * ipath_unordered_wc - indicate whether write combining is ordered
- *
- * Because our performance depends on our ability to do write combining mmio
- * writes in the most efficient way, we need to know if we are on an Intel
- * or AMD x86_64 processor.  AMD x86_64 processors flush WC buffers out in
- * the order completed, and so no special flushing is required to get
- * correct ordering.  Intel processors, however, will flush write buffers
- * out in "random" orders, and so explicit ordering is needed at times.
- */
-int ipath_unordered_wc(void)
-{
-       return boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
-}
index 56a593e0ae5d1f537db0f3615ee29eb99b0defc2..39a488889fc7a9981213b25567f051dc772bc510 100644 (file)
@@ -372,7 +372,7 @@ int mlx4_ib_demux_cm_handler(struct ib_device *ibdev, int port, int *slave,
                *slave = mlx4_ib_find_real_gid(ibdev, port, gid.global.interface_id);
                if (*slave < 0) {
                        mlx4_ib_warn(ibdev, "failed matching slave_id by gid (0x%llx)\n",
-                                       gid.global.interface_id);
+                                    be64_to_cpu(gid.global.interface_id));
                        return -ENOENT;
                }
                return 0;
index 543ecdd8667bad824fa3313a5b45be9693a5fc69..0176caa5792c4576276470c2c3f86f0fca16a7bd 100644 (file)
@@ -369,8 +369,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
        int err;
 
        mutex_lock(&cq->resize_mutex);
-
-       if (entries < 1) {
+       if (entries < 1 || entries > dev->dev->caps.max_cqes) {
                err = -EINVAL;
                goto out;
        }
@@ -381,7 +380,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                goto out;
        }
 
-       if (entries > dev->dev->caps.max_cqes) {
+       if (entries > dev->dev->caps.max_cqes + 1) {
                err = -EINVAL;
                goto out;
        }
@@ -394,7 +393,7 @@ int mlx4_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                /* Can't be smaller than the number of outstanding CQEs */
                outst_cqe = mlx4_ib_get_outstanding_cqes(cq);
                if (entries < outst_cqe + 1) {
-                       err = 0;
+                       err = -EINVAL;
                        goto out;
                }
 
index eb8e215f1613ee95ae7fb6253ba6b06220f1b00c..ac6e2b710ea6fef928271869f0e170f2bbdb158d 100644 (file)
@@ -1269,8 +1269,7 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct mlx4_dev *dev = mdev->dev;
        struct mlx4_ib_qp *mqp = to_mqp(ibqp);
        struct mlx4_ib_steering *ib_steering = NULL;
-       enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
-               MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+       enum mlx4_protocol prot = MLX4_PROT_IB_IPV6;
        struct mlx4_flow_reg_id reg_id;
 
        if (mdev->dev->caps.steering_mode ==
@@ -1284,8 +1283,10 @@ static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
                                    !!(mqp->flags &
                                       MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK),
                                    prot, &reg_id.id);
-       if (err)
+       if (err) {
+               pr_err("multicast attach op failed, err %d\n", err);
                goto err_malloc;
+       }
 
        reg_id.mirror = 0;
        if (mlx4_is_bonded(dev)) {
@@ -1348,9 +1349,7 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct net_device *ndev;
        struct mlx4_ib_gid_entry *ge;
        struct mlx4_flow_reg_id reg_id = {0, 0};
-
-       enum mlx4_protocol prot = (gid->raw[1] == 0x0e) ?
-               MLX4_PROT_IB_IPV4 : MLX4_PROT_IB_IPV6;
+       enum mlx4_protocol prot =  MLX4_PROT_IB_IPV6;
 
        if (mdev->dev->caps.steering_mode ==
            MLX4_STEERING_MODE_DEVICE_MANAGED) {
index dfc6ca128a7e355ef737976dceee50a7033e62b7..ed2bd6701f9b131c3dc3261cb2eae21a2d835524 100644 (file)
@@ -1696,8 +1696,10 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
                            qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI ||
                            qp->mlx4_ib_qp_type == MLX4_IB_QPT_TUN_GSI) {
                                err = handle_eth_ud_smac_index(dev, qp, (u8 *)attr->smac, context);
-                               if (err)
-                                       return -EINVAL;
+                               if (err) {
+                                       err = -EINVAL;
+                                       goto out;
+                               }
                                if (qp->mlx4_ib_qp_type == MLX4_IB_QPT_PROXY_GSI)
                                        dev->qp1_proxy[qp->port - 1] = qp;
                        }
index 03bf81211a5401c366522c68213ecf27cdf4b326..cc4ac1e583b29725af01e03e40bebaee758c6e01 100644 (file)
@@ -997,7 +997,7 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
        struct ib_device_attr *dprops = NULL;
        struct ib_port_attr *pprops = NULL;
        struct mlx5_general_caps *gen;
-       int err = 0;
+       int err = -ENOMEM;
        int port;
 
        gen = &dev->mdev->caps.gen;
@@ -1331,6 +1331,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
                (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ)         |
                (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ)         |
                (1ull << IB_USER_VERBS_CMD_OPEN_QP);
+       dev->ib_dev.uverbs_ex_cmd_mask =
+               (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
 
        dev->ib_dev.query_device        = mlx5_ib_query_device;
        dev->ib_dev.query_port          = mlx5_ib_query_port;
index 32a28bd50b20ae08c41a9a7086b72045f7934c3d..cd9822eeacae3f1ab138731ea9c6a67963974bc2 100644 (file)
@@ -1012,6 +1012,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
                goto err_2;
        }
        mr->umem = umem;
+       mr->dev = dev;
        mr->live = 1;
        kvfree(in);
 
index b43456ae124bccb99cfe78446b1e1ce347c8f2a6..c9780d919769a6ef9a0020b7e710afa7e7ce2497 100644 (file)
@@ -40,7 +40,7 @@
 #include <be_roce.h>
 #include "ocrdma_sli.h"
 
-#define OCRDMA_ROCE_DRV_VERSION "10.2.287.0u"
+#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u"
 
 #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
 #define OCRDMA_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
 
 #define convert_to_64bit(lo, hi) ((u64)hi << 32 | (u64)lo)
+#define EQ_INTR_PER_SEC_THRSH_HI 150000
+#define EQ_INTR_PER_SEC_THRSH_LOW 100000
+#define EQ_AIC_MAX_EQD 20
+#define EQ_AIC_MIN_EQD 0
+
+void ocrdma_eqd_set_task(struct work_struct *work);
 
 struct ocrdma_dev_attr {
        u8 fw_ver[32];
        u32 vendor_id;
        u32 device_id;
        u16 max_pd;
+       u16 max_dpp_pds;
        u16 max_cq;
        u16 max_cqe;
        u16 max_qp;
@@ -116,12 +123,19 @@ struct ocrdma_queue_info {
        bool created;
 };
 
+struct ocrdma_aic_obj {         /* Adaptive interrupt coalescing (AIC) info */
+       u32 prev_eqd;
+       u64 eq_intr_cnt;
+       u64 prev_eq_intr_cnt;
+};
+
 struct ocrdma_eq {
        struct ocrdma_queue_info q;
        u32 vector;
        int cq_cnt;
        struct ocrdma_dev *dev;
        char irq_name[32];
+       struct ocrdma_aic_obj aic_obj;
 };
 
 struct ocrdma_mq {
@@ -171,6 +185,21 @@ struct ocrdma_stats {
        struct ocrdma_dev *dev;
 };
 
+struct ocrdma_pd_resource_mgr {
+       u32 pd_norm_start;
+       u16 pd_norm_count;
+       u16 pd_norm_thrsh;
+       u16 max_normal_pd;
+       u32 pd_dpp_start;
+       u16 pd_dpp_count;
+       u16 pd_dpp_thrsh;
+       u16 max_dpp_pd;
+       u16 dpp_page_index;
+       unsigned long *pd_norm_bitmap;
+       unsigned long *pd_dpp_bitmap;
+       bool pd_prealloc_valid;
+};
+
 struct stats_mem {
        struct ocrdma_mqe mqe;
        void *va;
@@ -198,6 +227,7 @@ struct ocrdma_dev {
 
        struct ocrdma_eq *eq_tbl;
        int eq_cnt;
+       struct delayed_work eqd_work;
        u16 base_eqid;
        u16 max_eq;
 
@@ -255,7 +285,12 @@ struct ocrdma_dev {
        struct ocrdma_stats rx_qp_err_stats;
        struct ocrdma_stats tx_dbg_stats;
        struct ocrdma_stats rx_dbg_stats;
+       struct ocrdma_stats driver_stats;
+       struct ocrdma_stats reset_stats;
        struct dentry *dir;
+       atomic_t async_err_stats[OCRDMA_MAX_ASYNC_ERRORS];
+       atomic_t cqe_err_stats[OCRDMA_MAX_CQE_ERR];
+       struct ocrdma_pd_resource_mgr *pd_mgr;
 };
 
 struct ocrdma_cq {
@@ -335,7 +370,6 @@ struct ocrdma_srq {
 
 struct ocrdma_qp {
        struct ib_qp ibqp;
-       struct ocrdma_dev *dev;
 
        u8 __iomem *sq_db;
        struct ocrdma_qp_hwq_info sq;
index f3cc8c9e65ae70f9e0b157632189e324970cbea5..d812904f398473d1502bb979d6d822c04b55f2b8 100644 (file)
 #include <net/netevent.h>
 
 #include <rdma/ib_addr.h>
+#include <rdma/ib_mad.h>
 
 #include "ocrdma.h"
 #include "ocrdma_verbs.h"
 #include "ocrdma_ah.h"
 #include "ocrdma_hw.h"
+#include "ocrdma_stats.h"
 
 #define OCRDMA_VID_PCP_SHIFT   0xD
 
 static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
-                       struct ib_ah_attr *attr, union ib_gid *sgid, int pdid)
+                       struct ib_ah_attr *attr, union ib_gid *sgid,
+                       int pdid, bool *isvlan)
 {
        int status = 0;
-       u16 vlan_tag; bool vlan_enabled = false;
+       u16 vlan_tag;
        struct ocrdma_eth_vlan eth;
        struct ocrdma_grh grh;
        int eth_sz;
@@ -59,7 +62,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
                vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
                eth.vlan_tag = cpu_to_be16(vlan_tag);
                eth_sz = sizeof(struct ocrdma_eth_vlan);
-               vlan_enabled = true;
+               *isvlan = true;
        } else {
                eth.eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
                eth_sz = sizeof(struct ocrdma_eth_basic);
@@ -82,7 +85,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
        /* Eth HDR */
        memcpy(&ah->av->eth_hdr, &eth, eth_sz);
        memcpy((u8 *)ah->av + eth_sz, &grh, sizeof(struct ocrdma_grh));
-       if (vlan_enabled)
+       if (*isvlan)
                ah->av->valid |= OCRDMA_AV_VLAN_VALID;
        ah->av->valid = cpu_to_le32(ah->av->valid);
        return status;
@@ -91,6 +94,7 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
 struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
 {
        u32 *ahid_addr;
+       bool isvlan = false;
        int status;
        struct ocrdma_ah *ah;
        struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
@@ -127,15 +131,20 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
                }
        }
 
-       status = set_av_attr(dev, ah, attr, &sgid, pd->id);
+       status = set_av_attr(dev, ah, attr, &sgid, pd->id, &isvlan);
        if (status)
                goto av_conf_err;
 
        /* if pd is for the user process, pass the ah_id to user space */
        if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
                ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
-               *ahid_addr = ah->id;
+               *ahid_addr = 0;
+               *ahid_addr |= ah->id & OCRDMA_AH_ID_MASK;
+               if (isvlan)
+                       *ahid_addr |= (OCRDMA_AH_VLAN_VALID_MASK <<
+                                      OCRDMA_AH_VLAN_VALID_SHIFT);
        }
+
        return &ah->ibah;
 
 av_conf_err:
@@ -191,5 +200,20 @@ int ocrdma_process_mad(struct ib_device *ibdev,
                       struct ib_grh *in_grh,
                       struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
-       return IB_MAD_RESULT_SUCCESS;
+       int status;
+       struct ocrdma_dev *dev;
+
+       switch (in_mad->mad_hdr.mgmt_class) {
+       case IB_MGMT_CLASS_PERF_MGMT:
+               dev = get_ocrdma_dev(ibdev);
+               if (!ocrdma_pma_counters(dev, out_mad))
+                       status = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
+               else
+                       status = IB_MAD_RESULT_SUCCESS;
+               break;
+       default:
+               status = IB_MAD_RESULT_SUCCESS;
+               break;
+       }
+       return status;
 }
index 8ac49e7f96d1585c7fb94049dd7db88a0699b7f4..726a87cf22dcb215d2a105f08a054395bb0c6804 100644 (file)
 #ifndef __OCRDMA_AH_H__
 #define __OCRDMA_AH_H__
 
+enum {
+       OCRDMA_AH_ID_MASK               = 0x3FF,
+       OCRDMA_AH_VLAN_VALID_MASK       = 0x01,
+       OCRDMA_AH_VLAN_VALID_SHIFT      = 0x1F
+};
+
 struct ib_ah *ocrdma_create_ah(struct ib_pd *, struct ib_ah_attr *);
 int ocrdma_destroy_ah(struct ib_ah *);
 int ocrdma_query_ah(struct ib_ah *, struct ib_ah_attr *);
index 638bff1ffc6c73b95a41a1556ff42a06680d6bca..0c9e95909a64651e931f4768e88f97e266c8379e 100644 (file)
@@ -734,6 +734,9 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
                break;
        }
 
+       if (type < OCRDMA_MAX_ASYNC_ERRORS)
+               atomic_inc(&dev->async_err_stats[type]);
+
        if (qp_event) {
                if (qp->ibqp.event_handler)
                        qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
@@ -831,20 +834,20 @@ static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
        return 0;
 }
 
-static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
-                                      struct ocrdma_cq *cq)
+static struct ocrdma_cq *_ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
+                               struct ocrdma_cq *cq, bool sq)
 {
-       unsigned long flags;
        struct ocrdma_qp *qp;
-       bool buddy_cq_found = false;
-       /* Go through list of QPs in error state which are using this CQ
-        * and invoke its callback handler to trigger CQE processing for
-        * error/flushed CQE. It is rare to find more than few entries in
-        * this list as most consumers stops after getting error CQE.
-        * List is traversed only once when a matching buddy cq found for a QP.
-        */
-       spin_lock_irqsave(&dev->flush_q_lock, flags);
-       list_for_each_entry(qp, &cq->sq_head, sq_entry) {
+       struct list_head *cur;
+       struct ocrdma_cq *bcq = NULL;
+       struct list_head *head = sq?(&cq->sq_head):(&cq->rq_head);
+
+       list_for_each(cur, head) {
+               if (sq)
+                       qp = list_entry(cur, struct ocrdma_qp, sq_entry);
+               else
+                       qp = list_entry(cur, struct ocrdma_qp, rq_entry);
+
                if (qp->srq)
                        continue;
                /* if wq and rq share the same cq, than comp_handler
@@ -856,19 +859,41 @@ static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
                 * if completion came on rq, sq's cq is buddy cq.
                 */
                if (qp->sq_cq == cq)
-                       cq = qp->rq_cq;
+                       bcq = qp->rq_cq;
                else
-                       cq = qp->sq_cq;
-               buddy_cq_found = true;
-               break;
+                       bcq = qp->sq_cq;
+               return bcq;
        }
+       return NULL;
+}
+
+static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
+                                      struct ocrdma_cq *cq)
+{
+       unsigned long flags;
+       struct ocrdma_cq *bcq = NULL;
+
+       /* Go through list of QPs in error state which are using this CQ
+        * and invoke its callback handler to trigger CQE processing for
+        * error/flushed CQE. It is rare to find more than few entries in
+        * this list as most consumers stops after getting error CQE.
+        * List is traversed only once when a matching buddy cq found for a QP.
+        */
+       spin_lock_irqsave(&dev->flush_q_lock, flags);
+       /* Check if buddy CQ is present.
+        * true - Check for  SQ CQ
+        * false - Check for RQ CQ
+        */
+       bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, true);
+       if (bcq == NULL)
+               bcq = _ocrdma_qp_buddy_cq_handler(dev, cq, false);
        spin_unlock_irqrestore(&dev->flush_q_lock, flags);
-       if (buddy_cq_found == false)
-               return;
-       if (cq->ibcq.comp_handler) {
-               spin_lock_irqsave(&cq->comp_handler_lock, flags);
-               (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
-               spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
+
+       /* if there is valid buddy cq, look for its completion handler */
+       if (bcq && bcq->ibcq.comp_handler) {
+               spin_lock_irqsave(&bcq->comp_handler_lock, flags);
+               (*bcq->ibcq.comp_handler) (&bcq->ibcq, bcq->ibcq.cq_context);
+               spin_unlock_irqrestore(&bcq->comp_handler_lock, flags);
        }
 }
 
@@ -935,6 +960,7 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
 
        } while (budget);
 
+       eq->aic_obj.eq_intr_cnt++;
        ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
        return IRQ_HANDLED;
 }
@@ -1050,6 +1076,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
        attr->max_pd =
            (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
+       attr->max_dpp_pds =
+          (rsp->max_dpp_pds_credits & OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_MASK) >>
+           OCRDMA_MBX_QUERY_CFG_MAX_DPP_PDS_OFFSET;
        attr->max_qp =
            (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
            OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
@@ -1396,6 +1425,122 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
        return status;
 }
 
+
+static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
+{
+       int status = -ENOMEM;
+       size_t pd_bitmap_size;
+       struct ocrdma_alloc_pd_range *cmd;
+       struct ocrdma_alloc_pd_range_rsp *rsp;
+
+       /* Pre allocate the DPP PDs */
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
+       if (!cmd)
+               return -ENOMEM;
+       cmd->pd_count = dev->attr.max_dpp_pds;
+       cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
+       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       if (status)
+               goto mbx_err;
+       rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
+
+       if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
+               dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
+                               OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
+               dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
+                               OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
+               dev->pd_mgr->max_dpp_pd = rsp->pd_count;
+               pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
+               dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
+                                                    GFP_KERNEL);
+       }
+       kfree(cmd);
+
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
+       if (!cmd)
+               return -ENOMEM;
+
+       cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
+       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       if (status)
+               goto mbx_err;
+       rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
+       if (rsp->pd_count) {
+               dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
+                                       OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
+               dev->pd_mgr->max_normal_pd = rsp->pd_count;
+               pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
+               dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
+                                                     GFP_KERNEL);
+       }
+
+       if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
+               /* Enable PD resource manager */
+               dev->pd_mgr->pd_prealloc_valid = true;
+       } else {
+               return -ENOMEM;
+       }
+mbx_err:
+       kfree(cmd);
+       return status;
+}
+
+static void ocrdma_mbx_dealloc_pd_range(struct ocrdma_dev *dev)
+{
+       struct ocrdma_dealloc_pd_range *cmd;
+
+       /* return normal PDs to firmware */
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE, sizeof(*cmd));
+       if (!cmd)
+               goto mbx_err;
+
+       if (dev->pd_mgr->max_normal_pd) {
+               cmd->start_pd_id = dev->pd_mgr->pd_norm_start;
+               cmd->pd_count = dev->pd_mgr->max_normal_pd;
+               ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       }
+
+       if (dev->pd_mgr->max_dpp_pd) {
+               kfree(cmd);
+               /* return DPP PDs to firmware */
+               cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD_RANGE,
+                                         sizeof(*cmd));
+               if (!cmd)
+                       goto mbx_err;
+
+               cmd->start_pd_id = dev->pd_mgr->pd_dpp_start;
+               cmd->pd_count = dev->pd_mgr->max_dpp_pd;
+               ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       }
+mbx_err:
+       kfree(cmd);
+}
+
+void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev)
+{
+       int status;
+
+       dev->pd_mgr = kzalloc(sizeof(struct ocrdma_pd_resource_mgr),
+                             GFP_KERNEL);
+       if (!dev->pd_mgr) {
+               pr_err("%s(%d)Memory allocation failure.\n", __func__, dev->id);
+               return;
+       }
+       status = ocrdma_mbx_alloc_pd_range(dev);
+       if (status) {
+               pr_err("%s(%d) Unable to initialize PD pool, using default.\n",
+                        __func__, dev->id);
+       }
+}
+
+static void ocrdma_free_pd_pool(struct ocrdma_dev *dev)
+{
+       ocrdma_mbx_dealloc_pd_range(dev);
+       kfree(dev->pd_mgr->pd_norm_bitmap);
+       kfree(dev->pd_mgr->pd_dpp_bitmap);
+       kfree(dev->pd_mgr);
+}
+
 static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
                               int *num_pages, int *page_size)
 {
@@ -1896,8 +2041,9 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
 {
        bool found;
        unsigned long flags;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
-       spin_lock_irqsave(&qp->dev->flush_q_lock, flags);
+       spin_lock_irqsave(&dev->flush_q_lock, flags);
        found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
        if (!found)
                list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
@@ -1906,7 +2052,7 @@ void ocrdma_flush_qp(struct ocrdma_qp *qp)
                if (!found)
                        list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
        }
-       spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
+       spin_unlock_irqrestore(&dev->flush_q_lock, flags);
 }
 
 static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
@@ -1972,7 +2118,8 @@ static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
        int status;
        u32 len, hw_pages, hw_page_size;
        dma_addr_t pa;
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
        struct pci_dev *pdev = dev->nic_info.pdev;
        u32 max_wqe_allocated;
        u32 max_sges = attrs->cap.max_send_sge;
@@ -2027,7 +2174,8 @@ static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
        int status;
        u32 len, hw_pages, hw_page_size;
        dma_addr_t pa = 0;
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
        struct pci_dev *pdev = dev->nic_info.pdev;
        u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
 
@@ -2086,7 +2234,8 @@ static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
                                        struct ocrdma_qp *qp)
 {
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
        struct pci_dev *pdev = dev->nic_info.pdev;
        dma_addr_t pa = 0;
        int ird_page_size = dev->attr.ird_page_size;
@@ -2157,8 +2306,8 @@ int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
 {
        int status = -ENOMEM;
        u32 flags = 0;
-       struct ocrdma_dev *dev = qp->dev;
        struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
        struct pci_dev *pdev = dev->nic_info.pdev;
        struct ocrdma_cq *cq;
        struct ocrdma_create_qp_req *cmd;
@@ -2281,11 +2430,12 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
        union ib_gid sgid, zgid;
        u32 vlan_id;
        u8 mac_addr[6];
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
        if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
                return -EINVAL;
-       if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0))
-               ocrdma_init_service_level(qp->dev);
+       if (atomic_cmpxchg(&dev->update_sl, 1, 0))
+               ocrdma_init_service_level(dev);
        cmd->params.tclass_sq_psn |=
            (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
        cmd->params.rnt_rc_sl_fl |=
@@ -2296,7 +2446,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
        cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
        memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
               sizeof(cmd->params.dgid));
-       status = ocrdma_query_gid(&qp->dev->ibdev, 1,
+       status = ocrdma_query_gid(&dev->ibdev, 1,
                        ah_attr->grh.sgid_index, &sgid);
        if (status)
                return status;
@@ -2307,7 +2457,9 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
 
        qp->sgid_idx = ah_attr->grh.sgid_index;
        memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
-       ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]);
+       status = ocrdma_resolve_dmac(dev, ah_attr, &mac_addr[0]);
+       if (status)
+               return status;
        cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
                                (mac_addr[2] << 16) | (mac_addr[3] << 24);
        /* convert them to LE format. */
@@ -2320,7 +2472,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
                    vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
                cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
                cmd->params.rnt_rc_sl_fl |=
-                       (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
+                       (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
        }
        return 0;
 }
@@ -2330,6 +2482,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                                struct ib_qp_attr *attrs, int attr_mask)
 {
        int status = 0;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
        if (attr_mask & IB_QP_PKEY_INDEX) {
                cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
@@ -2347,12 +2500,12 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                        return status;
        } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
                /* set the default mac address for UD, GSI QPs */
-               cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
-                       (qp->dev->nic_info.mac_addr[1] << 8) |
-                       (qp->dev->nic_info.mac_addr[2] << 16) |
-                       (qp->dev->nic_info.mac_addr[3] << 24);
-               cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |
-                                       (qp->dev->nic_info.mac_addr[5] << 8);
+               cmd->params.dmac_b0_to_b3 = dev->nic_info.mac_addr[0] |
+                       (dev->nic_info.mac_addr[1] << 8) |
+                       (dev->nic_info.mac_addr[2] << 16) |
+                       (dev->nic_info.mac_addr[3] << 24);
+               cmd->params.vlan_dmac_b4_to_b5 = dev->nic_info.mac_addr[4] |
+                                       (dev->nic_info.mac_addr[5] << 8);
        }
        if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
            attrs->en_sqd_async_notify) {
@@ -2409,7 +2562,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
        }
        if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
-               if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {
+               if (attrs->max_rd_atomic > dev->attr.max_ord_per_qp) {
                        status = -EINVAL;
                        goto pmtu_err;
                }
@@ -2417,7 +2570,7 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
        }
        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
-               if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {
+               if (attrs->max_dest_rd_atomic > dev->attr.max_ird_per_qp) {
                        status = -EINVAL;
                        goto pmtu_err;
                }
@@ -2870,6 +3023,82 @@ done:
        return status;
 }
 
+static int ocrdma_mbx_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
+                                int num)
+{
+       int i, status = -ENOMEM;
+       struct ocrdma_modify_eqd_req *cmd;
+
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_EQ_DELAY, sizeof(*cmd));
+       if (!cmd)
+               return status;
+
+       ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_MODIFY_EQ_DELAY,
+                       OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
+
+       cmd->cmd.num_eq = num;
+       for (i = 0; i < num; i++) {
+               cmd->cmd.set_eqd[i].eq_id = eq[i].q.id;
+               cmd->cmd.set_eqd[i].phase = 0;
+               cmd->cmd.set_eqd[i].delay_multiplier =
+                               (eq[i].aic_obj.prev_eqd * 65)/100;
+       }
+       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+       if (status)
+               goto mbx_err;
+mbx_err:
+       kfree(cmd);
+       return status;
+}
+
+static int ocrdma_modify_eqd(struct ocrdma_dev *dev, struct ocrdma_eq *eq,
+                            int num)
+{
+       int num_eqs, i = 0;
+       if (num > 8) {
+               while (num) {
+                       num_eqs = min(num, 8);
+                       ocrdma_mbx_modify_eqd(dev, &eq[i], num_eqs);
+                       i += num_eqs;
+                       num -= num_eqs;
+               }
+       } else {
+               ocrdma_mbx_modify_eqd(dev, eq, num);
+       }
+       return 0;
+}
+
+void ocrdma_eqd_set_task(struct work_struct *work)
+{
+       struct ocrdma_dev *dev =
+               container_of(work, struct ocrdma_dev, eqd_work.work);
+       struct ocrdma_eq *eq = 0;
+       int i, num = 0, status = -EINVAL;
+       u64 eq_intr;
+
+       for (i = 0; i < dev->eq_cnt; i++) {
+               eq = &dev->eq_tbl[i];
+               if (eq->aic_obj.eq_intr_cnt > eq->aic_obj.prev_eq_intr_cnt) {
+                       eq_intr = eq->aic_obj.eq_intr_cnt -
+                                 eq->aic_obj.prev_eq_intr_cnt;
+                       if ((eq_intr > EQ_INTR_PER_SEC_THRSH_HI) &&
+                           (eq->aic_obj.prev_eqd == EQ_AIC_MIN_EQD)) {
+                               eq->aic_obj.prev_eqd = EQ_AIC_MAX_EQD;
+                               num++;
+                       } else if ((eq_intr < EQ_INTR_PER_SEC_THRSH_LOW) &&
+                                  (eq->aic_obj.prev_eqd == EQ_AIC_MAX_EQD)) {
+                               eq->aic_obj.prev_eqd = EQ_AIC_MIN_EQD;
+                               num++;
+                       }
+               }
+               eq->aic_obj.prev_eq_intr_cnt = eq->aic_obj.eq_intr_cnt;
+       }
+
+       if (num)
+               status = ocrdma_modify_eqd(dev, &dev->eq_tbl[0], num);
+       schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
+}
+
 int ocrdma_init_hw(struct ocrdma_dev *dev)
 {
        int status;
@@ -2915,6 +3144,7 @@ qpeq_err:
 
 void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
 {
+       ocrdma_free_pd_pool(dev);
        ocrdma_mbx_delete_ah_tbl(dev);
 
        /* cleanup the eqs */
index 6eed8f191322a134fc0dcd1438cf771525c06a18..e905972fceb7d48ff882800390c1330367815caf 100644 (file)
@@ -136,5 +136,7 @@ int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq);
 int ocrdma_mbx_rdma_stats(struct ocrdma_dev *, bool reset);
 char *port_speed_string(struct ocrdma_dev *dev);
 void ocrdma_init_service_level(struct ocrdma_dev *);
+void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev);
+void ocrdma_free_pd_range(struct ocrdma_dev *dev);
 
 #endif                         /* __OCRDMA_HW_H__ */
index b0b2257b8e0430738cc7b5f5f36145868c7d4dc3..7a2b59aca004bfac1eae4fc258fcb08d077bf449 100644 (file)
@@ -239,7 +239,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
 
        dev->ibdev.node_type = RDMA_NODE_IB_CA;
        dev->ibdev.phys_port_cnt = 1;
-       dev->ibdev.num_comp_vectors = 1;
+       dev->ibdev.num_comp_vectors = dev->eq_cnt;
 
        /* mandatory verbs. */
        dev->ibdev.query_device = ocrdma_query_device;
@@ -329,6 +329,8 @@ static int ocrdma_alloc_resources(struct ocrdma_dev *dev)
        if (dev->stag_arr == NULL)
                goto alloc_err;
 
+       ocrdma_alloc_pd_pool(dev);
+
        spin_lock_init(&dev->av_tbl.lock);
        spin_lock_init(&dev->flush_q_lock);
        return 0;
@@ -491,6 +493,9 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
        spin_unlock(&ocrdma_devlist_lock);
        /* Init stats */
        ocrdma_add_port_stats(dev);
+       /* Interrupt Moderation */
+       INIT_DELAYED_WORK(&dev->eqd_work, ocrdma_eqd_set_task);
+       schedule_delayed_work(&dev->eqd_work, msecs_to_jiffies(1000));
 
        pr_info("%s %s: %s \"%s\" port %d\n",
                dev_name(&dev->nic_info.pdev->dev), hca_name(dev),
@@ -528,11 +533,12 @@ static void ocrdma_remove(struct ocrdma_dev *dev)
        /* first unregister with stack to stop all the active traffic
         * of the registered clients.
         */
-       ocrdma_rem_port_stats(dev);
+       cancel_delayed_work_sync(&dev->eqd_work);
        ocrdma_remove_sysfiles(dev);
-
        ib_unregister_device(&dev->ibdev);
 
+       ocrdma_rem_port_stats(dev);
+
        spin_lock(&ocrdma_devlist_lock);
        list_del_rcu(&dev->entry);
        spin_unlock(&ocrdma_devlist_lock);
index 4e036480c1a8fa7d9f8d1be9e2267ea3147b2b5a..243c87c8bd65d09026f46ee12e3ee3b9109ce155 100644 (file)
@@ -75,6 +75,8 @@ enum {
        OCRDMA_CMD_DESTROY_RBQ = 26,
 
        OCRDMA_CMD_GET_RDMA_STATS = 27,
+       OCRDMA_CMD_ALLOC_PD_RANGE = 28,
+       OCRDMA_CMD_DEALLOC_PD_RANGE = 29,
 
        OCRDMA_CMD_MAX
 };
@@ -87,6 +89,7 @@ enum {
        OCRDMA_CMD_CREATE_MQ            = 21,
        OCRDMA_CMD_GET_CTRL_ATTRIBUTES  = 32,
        OCRDMA_CMD_GET_FW_VER           = 35,
+       OCRDMA_CMD_MODIFY_EQ_DELAY      = 41,
        OCRDMA_CMD_DELETE_MQ            = 53,
        OCRDMA_CMD_DELETE_CQ            = 54,
        OCRDMA_CMD_DELETE_EQ            = 55,
@@ -101,7 +104,7 @@ enum {
        QTYPE_MCCQ      = 3
 };
 
-#define OCRDMA_MAX_SGID                8
+#define OCRDMA_MAX_SGID                16
 
 #define OCRDMA_MAX_QP    2048
 #define OCRDMA_MAX_CQ    2048
@@ -314,6 +317,29 @@ struct ocrdma_create_eq_rsp {
 
 #define OCRDMA_EQ_MINOR_OTHER  0x1
 
+struct ocrmda_set_eqd {
+       u32 eq_id;
+       u32 phase;
+       u32 delay_multiplier;
+};
+
+struct ocrdma_modify_eqd_cmd {
+       struct ocrdma_mbx_hdr req;
+       u32 num_eq;
+       struct ocrmda_set_eqd set_eqd[8];
+} __packed;
+
+struct ocrdma_modify_eqd_req {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_modify_eqd_cmd cmd;
+};
+
+
+struct ocrdma_modify_eq_delay_rsp {
+       struct ocrdma_mbx_rsp hdr;
+       u32 rsvd0;
+} __packed;
+
 enum {
        OCRDMA_MCQE_STATUS_SHIFT        = 0,
        OCRDMA_MCQE_STATUS_MASK         = 0xFFFF,
@@ -441,7 +467,9 @@ enum OCRDMA_ASYNC_EVENT_TYPE {
        OCRDMA_DEVICE_FATAL_EVENT       = 0x08,
        OCRDMA_SRQCAT_ERROR             = 0x0E,
        OCRDMA_SRQ_LIMIT_EVENT          = 0x0F,
-       OCRDMA_QP_LAST_WQE_EVENT        = 0x10
+       OCRDMA_QP_LAST_WQE_EVENT        = 0x10,
+
+       OCRDMA_MAX_ASYNC_ERRORS
 };
 
 /* mailbox command request and responses */
@@ -1297,6 +1325,37 @@ struct ocrdma_dealloc_pd_rsp {
        struct ocrdma_mbx_rsp rsp;
 };
 
+struct ocrdma_alloc_pd_range {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_mbx_hdr req;
+       u32 enable_dpp_rsvd;
+       u32 pd_count;
+};
+
+struct ocrdma_alloc_pd_range_rsp {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_mbx_rsp rsp;
+       u32 dpp_page_pdid;
+       u32 pd_count;
+};
+
+enum {
+       OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK = 0xFFFF,
+};
+
+struct ocrdma_dealloc_pd_range {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_mbx_hdr req;
+       u32 start_pd_id;
+       u32 pd_count;
+};
+
+struct ocrdma_dealloc_pd_range_rsp {
+       struct ocrdma_mqe_hdr hdr;
+       struct ocrdma_mbx_hdr req;
+       u32 rsvd;
+};
+
 enum {
        OCRDMA_ADDR_CHECK_ENABLE        = 1,
        OCRDMA_ADDR_CHECK_DISABLE       = 0
@@ -1597,7 +1656,9 @@ enum OCRDMA_CQE_STATUS {
        OCRDMA_CQE_INV_EEC_STATE_ERR,
        OCRDMA_CQE_FATAL_ERR,
        OCRDMA_CQE_RESP_TIMEOUT_ERR,
-       OCRDMA_CQE_GENERAL_ERR
+       OCRDMA_CQE_GENERAL_ERR,
+
+       OCRDMA_MAX_CQE_ERR
 };
 
 enum {
@@ -1673,6 +1734,7 @@ enum {
        OCRDMA_FLAG_FENCE_R     = 0x8,
        OCRDMA_FLAG_SOLICIT     = 0x10,
        OCRDMA_FLAG_IMM         = 0x20,
+       OCRDMA_FLAG_AH_VLAN_PR  = 0x40,
 
        /* Stag flags */
        OCRDMA_LKEY_FLAG_LOCAL_WR       = 0x1,
index 41a9aec9998d103f81dc325129cdab47bd20e2ff..48d7ef51aa0c209678e0ed4bbe97bc5ff9a881d5 100644 (file)
@@ -26,6 +26,7 @@
  *******************************************************************/
 
 #include <rdma/ib_addr.h>
+#include <rdma/ib_pma.h>
 #include "ocrdma_stats.h"
 
 static struct dentry *ocrdma_dbgfs_dir;
@@ -249,6 +250,27 @@ static char *ocrdma_rx_stats(struct ocrdma_dev *dev)
        return stats;
 }
 
+static u64 ocrdma_sysfs_rcv_pkts(struct ocrdma_dev *dev)
+{
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+               (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+       return convert_to_64bit(rx_stats->roce_frames_lo,
+               rx_stats->roce_frames_hi) + (u64)rx_stats->roce_frame_icrc_drops
+               + (u64)rx_stats->roce_frame_payload_len_drops;
+}
+
+static u64 ocrdma_sysfs_rcv_data(struct ocrdma_dev *dev)
+{
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+               (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_rx_stats *rx_stats = &rdma_stats->rx_stats;
+
+       return (convert_to_64bit(rx_stats->roce_frame_bytes_lo,
+               rx_stats->roce_frame_bytes_hi))/4;
+}
+
 static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
 {
        char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -292,6 +314,37 @@ static char *ocrdma_tx_stats(struct ocrdma_dev *dev)
        return stats;
 }
 
+static u64 ocrdma_sysfs_xmit_pkts(struct ocrdma_dev *dev)
+{
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+               (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+       return (convert_to_64bit(tx_stats->send_pkts_lo,
+                                tx_stats->send_pkts_hi) +
+       convert_to_64bit(tx_stats->write_pkts_lo, tx_stats->write_pkts_hi) +
+       convert_to_64bit(tx_stats->read_pkts_lo, tx_stats->read_pkts_hi) +
+       convert_to_64bit(tx_stats->read_rsp_pkts_lo,
+                        tx_stats->read_rsp_pkts_hi) +
+       convert_to_64bit(tx_stats->ack_pkts_lo, tx_stats->ack_pkts_hi));
+}
+
+static u64 ocrdma_sysfs_xmit_data(struct ocrdma_dev *dev)
+{
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+               (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_tx_stats *tx_stats = &rdma_stats->tx_stats;
+
+       return (convert_to_64bit(tx_stats->send_bytes_lo,
+                                tx_stats->send_bytes_hi) +
+               convert_to_64bit(tx_stats->write_bytes_lo,
+                                tx_stats->write_bytes_hi) +
+               convert_to_64bit(tx_stats->read_req_bytes_lo,
+                                tx_stats->read_req_bytes_hi) +
+               convert_to_64bit(tx_stats->read_rsp_bytes_lo,
+                                tx_stats->read_rsp_bytes_hi))/4;
+}
+
 static char *ocrdma_wqe_stats(struct ocrdma_dev *dev)
 {
        char *stats = dev->stats_mem.debugfs_mem, *pcur;
@@ -432,10 +485,118 @@ static char *ocrdma_rx_dbg_stats(struct ocrdma_dev *dev)
        return dev->stats_mem.debugfs_mem;
 }
 
+static char *ocrdma_driver_dbg_stats(struct ocrdma_dev *dev)
+{
+       char *stats = dev->stats_mem.debugfs_mem, *pcur;
+
+
+       memset(stats, 0, (OCRDMA_MAX_DBGFS_MEM));
+
+       pcur = stats;
+       pcur += ocrdma_add_stat(stats, pcur, "async_cq_err",
+                               (u64)(dev->async_err_stats
+                               [OCRDMA_CQ_ERROR].counter));
+       pcur += ocrdma_add_stat(stats, pcur, "async_cq_overrun_err",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_CQ_OVERRUN_ERROR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_cq_qpcat_err",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_CQ_QPCAT_ERROR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_qp_access_err",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_QP_ACCESS_ERROR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_qp_commm_est_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_QP_COMM_EST_EVENT].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_sq_drained_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_SQ_DRAINED_EVENT].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_dev_fatal_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_DEVICE_FATAL_EVENT].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_srqcat_err",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_SRQCAT_ERROR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_srq_limit_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_SRQ_LIMIT_EVENT].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "async_qp_last_wqe_evt",
+                               (u64)dev->async_err_stats
+                               [OCRDMA_QP_LAST_WQE_EVENT].counter);
+
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_len_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_LEN_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_qp_op_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_QP_OP_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_eec_op_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_EEC_OP_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_prot_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_PROT_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_wr_flush_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_WR_FLUSH_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_mw_bind_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_MW_BIND_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_bad_resp_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_BAD_RESP_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_access_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_ACCESS_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_req_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_INV_REQ_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_access_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_ACCESS_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_op_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_OP_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_retry_exc_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_RETRY_EXC_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rnr_retry_exc_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_RNR_RETRY_EXC_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_loc_rdd_viol_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_LOC_RDD_VIOL_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_inv_rd_req_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_INV_RD_REQ_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_rem_abort_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_REM_ABORT_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eecn_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_INV_EECN_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_inv_eec_state_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_INV_EEC_STATE_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_fatal_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_FATAL_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_resp_timeout_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_RESP_TIMEOUT_ERR].counter);
+       pcur += ocrdma_add_stat(stats, pcur, "cqe_general_err",
+                               (u64)dev->cqe_err_stats
+                               [OCRDMA_CQE_GENERAL_ERR].counter);
+       return stats;
+}
+
 static void ocrdma_update_stats(struct ocrdma_dev *dev)
 {
        ulong now = jiffies, secs;
        int status = 0;
+       struct ocrdma_rdma_stats_resp *rdma_stats =
+                     (struct ocrdma_rdma_stats_resp *)dev->stats_mem.va;
+       struct ocrdma_rsrc_stats *rsrc_stats = &rdma_stats->act_rsrc_stats;
 
        secs = jiffies_to_msecs(now - dev->last_stats_time) / 1000U;
        if (secs) {
@@ -444,10 +605,74 @@ static void ocrdma_update_stats(struct ocrdma_dev *dev)
                if (status)
                        pr_err("%s: stats mbox failed with status = %d\n",
                               __func__, status);
+               /* Update PD counters from PD resource manager */
+               if (dev->pd_mgr->pd_prealloc_valid) {
+                       rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_count;
+                       rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_count;
+                       /* Threshold stata*/
+                       rsrc_stats = &rdma_stats->th_rsrc_stats;
+                       rsrc_stats->dpp_pds = dev->pd_mgr->pd_dpp_thrsh;
+                       rsrc_stats->non_dpp_pds = dev->pd_mgr->pd_norm_thrsh;
+               }
                dev->last_stats_time = jiffies;
        }
 }
 
+static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
+                                       const char __user *buffer,
+                                       size_t count, loff_t *ppos)
+{
+       char tmp_str[32];
+       long reset;
+       int status = 0;
+       struct ocrdma_stats *pstats = filp->private_data;
+       struct ocrdma_dev *dev = pstats->dev;
+
+       if (count > 32)
+               goto err;
+
+       if (copy_from_user(tmp_str, buffer, count))
+               goto err;
+
+       tmp_str[count-1] = '\0';
+       if (kstrtol(tmp_str, 10, &reset))
+               goto err;
+
+       switch (pstats->type) {
+       case OCRDMA_RESET_STATS:
+               if (reset) {
+                       status = ocrdma_mbx_rdma_stats(dev, true);
+                       if (status) {
+                               pr_err("Failed to reset stats = %d", status);
+                               goto err;
+                       }
+               }
+               break;
+       default:
+               goto err;
+       }
+
+       return count;
+err:
+       return -EFAULT;
+}
+
+int ocrdma_pma_counters(struct ocrdma_dev *dev,
+                       struct ib_mad *out_mad)
+{
+       struct ib_pma_portcounters *pma_cnt;
+
+       memset(out_mad->data, 0, sizeof out_mad->data);
+       pma_cnt = (void *)(out_mad->data + 40);
+       ocrdma_update_stats(dev);
+
+       pma_cnt->port_xmit_data    = cpu_to_be32(ocrdma_sysfs_xmit_data(dev));
+       pma_cnt->port_rcv_data     = cpu_to_be32(ocrdma_sysfs_rcv_data(dev));
+       pma_cnt->port_xmit_packets = cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev));
+       pma_cnt->port_rcv_packets  = cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev));
+       return 0;
+}
+
 static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
                                        size_t usr_buf_len, loff_t *ppos)
 {
@@ -492,6 +717,9 @@ static ssize_t ocrdma_dbgfs_ops_read(struct file *filp, char __user *buffer,
        case OCRDMA_RX_DBG_STATS:
                data = ocrdma_rx_dbg_stats(dev);
                break;
+       case OCRDMA_DRV_STATS:
+               data = ocrdma_driver_dbg_stats(dev);
+               break;
 
        default:
                status = -EFAULT;
@@ -514,6 +742,7 @@ static const struct file_operations ocrdma_dbg_ops = {
        .owner = THIS_MODULE,
        .open = simple_open,
        .read = ocrdma_dbgfs_ops_read,
+       .write = ocrdma_dbgfs_ops_write,
 };
 
 void ocrdma_add_port_stats(struct ocrdma_dev *dev)
@@ -582,6 +811,18 @@ void ocrdma_add_port_stats(struct ocrdma_dev *dev)
                                 &dev->rx_dbg_stats, &ocrdma_dbg_ops))
                goto err;
 
+       dev->driver_stats.type = OCRDMA_DRV_STATS;
+       dev->driver_stats.dev = dev;
+       if (!debugfs_create_file("driver_dbg_stats", S_IRUSR, dev->dir,
+                                       &dev->driver_stats, &ocrdma_dbg_ops))
+               goto err;
+
+       dev->reset_stats.type = OCRDMA_RESET_STATS;
+       dev->reset_stats.dev = dev;
+       if (!debugfs_create_file("reset_stats", S_IRUSR, dev->dir,
+                               &dev->reset_stats, &ocrdma_dbg_ops))
+               goto err;
+
        /* Now create dma_mem for stats mbx command */
        if (!ocrdma_alloc_stats_mem(dev))
                goto err;
index 5f5e20c46d7ccdc9fc02aa76084b971ad4bad4c7..091edd68a8a34678e5283b2374c58274c84580b3 100644 (file)
@@ -43,12 +43,16 @@ enum OCRDMA_STATS_TYPE {
        OCRDMA_RXQP_ERRSTATS,
        OCRDMA_TXQP_ERRSTATS,
        OCRDMA_TX_DBG_STATS,
-       OCRDMA_RX_DBG_STATS
+       OCRDMA_RX_DBG_STATS,
+       OCRDMA_DRV_STATS,
+       OCRDMA_RESET_STATS
 };
 
 void ocrdma_rem_debugfs(void);
 void ocrdma_init_debugfs(void);
 void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
 void ocrdma_add_port_stats(struct ocrdma_dev *dev);
+int ocrdma_pma_counters(struct ocrdma_dev *dev,
+                       struct ib_mad *out_mad);
 
 #endif /* __OCRDMA_STATS_H__ */
index fb8d8c4dfbb97d2b36abdf69888793741aba182a..877175563634df79a889ed8a428405258b9df1e4 100644 (file)
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
 
        dev = get_ocrdma_dev(ibdev);
        memset(sgid, 0, sizeof(*sgid));
-       if (index > OCRDMA_MAX_SGID)
+       if (index >= OCRDMA_MAX_SGID)
                return -EINVAL;
 
        memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -253,6 +253,107 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
        return found;
 }
 
+
+static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
+{
+       u16 pd_bitmap_idx = 0;
+       const unsigned long *pd_bitmap;
+
+       if (dpp_pool) {
+               pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
+               pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
+                                                   dev->pd_mgr->max_dpp_pd);
+               __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
+               dev->pd_mgr->pd_dpp_count++;
+               if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
+                       dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
+       } else {
+               pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
+               pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
+                                                   dev->pd_mgr->max_normal_pd);
+               __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
+               dev->pd_mgr->pd_norm_count++;
+               if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
+                       dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
+       }
+       return pd_bitmap_idx;
+}
+
+static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
+                                       bool dpp_pool)
+{
+       u16 pd_count;
+       u16 pd_bit_index;
+
+       pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
+                             dev->pd_mgr->pd_norm_count;
+       if (pd_count == 0)
+               return -EINVAL;
+
+       if (dpp_pool) {
+               pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
+               if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
+                       return -EINVAL;
+               } else {
+                       __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
+                       dev->pd_mgr->pd_dpp_count--;
+               }
+       } else {
+               pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
+               if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
+                       return -EINVAL;
+               } else {
+                       __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
+                       dev->pd_mgr->pd_norm_count--;
+               }
+       }
+
+       return 0;
+}
+
+static u8 ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
+                                  bool dpp_pool)
+{
+       int status;
+
+       mutex_lock(&dev->dev_lock);
+       status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
+       mutex_unlock(&dev->dev_lock);
+       return status;
+}
+
+static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
+{
+       u16 pd_idx = 0;
+       int status = 0;
+
+       mutex_lock(&dev->dev_lock);
+       if (pd->dpp_enabled) {
+               /* try allocating DPP PD, if not available then normal PD */
+               if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
+                       pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
+                       pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
+                       pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
+               } else if (dev->pd_mgr->pd_norm_count <
+                          dev->pd_mgr->max_normal_pd) {
+                       pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
+                       pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
+                       pd->dpp_enabled = false;
+               } else {
+                       status = -EINVAL;
+               }
+       } else {
+               if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
+                       pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
+                       pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
+               } else {
+                       status = -EINVAL;
+               }
+       }
+       mutex_unlock(&dev->dev_lock);
+       return status;
+}
+
 static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
                                          struct ocrdma_ucontext *uctx,
                                          struct ib_udata *udata)
@@ -272,6 +373,11 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
                                           dev->attr.wqe_size) : 0;
        }
 
+       if (dev->pd_mgr->pd_prealloc_valid) {
+               status = ocrdma_get_pd_num(dev, pd);
+               return (status == 0) ? pd : ERR_PTR(status);
+       }
+
 retry:
        status = ocrdma_mbx_alloc_pd(dev, pd);
        if (status) {
@@ -299,7 +405,11 @@ static int _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
 {
        int status = 0;
 
-       status = ocrdma_mbx_dealloc_pd(dev, pd);
+       if (dev->pd_mgr->pd_prealloc_valid)
+               status = ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
+       else
+               status = ocrdma_mbx_dealloc_pd(dev, pd);
+
        kfree(pd);
        return status;
 }
@@ -325,7 +435,6 @@ err:
 
 static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
 {
-       int status = 0;
        struct ocrdma_pd *pd = uctx->cntxt_pd;
        struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
 
@@ -334,8 +443,8 @@ static int ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
                       __func__, dev->id, pd->id);
        }
        uctx->cntxt_pd = NULL;
-       status = _ocrdma_dealloc_pd(dev, pd);
-       return status;
+       (void)_ocrdma_dealloc_pd(dev, pd);
+       return 0;
 }
 
 static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
@@ -569,7 +678,7 @@ err:
        if (is_uctx_pd) {
                ocrdma_release_ucontext_pd(uctx);
        } else {
-               status = ocrdma_mbx_dealloc_pd(dev, pd);
+               status = _ocrdma_dealloc_pd(dev, pd);
                kfree(pd);
        }
 exit:
@@ -837,9 +946,8 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
 {
        struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
        struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
-       int status;
 
-       status = ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
+       (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
 
        ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
 
@@ -850,11 +958,10 @@ int ocrdma_dereg_mr(struct ib_mr *ib_mr)
 
        /* Don't stop cleanup, in case FW is unresponsive */
        if (dev->mqe_ctx.fw_error_state) {
-               status = 0;
                pr_err("%s(%d) fw not responding.\n",
                       __func__, dev->id);
        }
-       return status;
+       return 0;
 }
 
 static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
@@ -986,7 +1093,6 @@ static void ocrdma_flush_cq(struct ocrdma_cq *cq)
 
 int ocrdma_destroy_cq(struct ib_cq *ibcq)
 {
-       int status;
        struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
        struct ocrdma_eq *eq = NULL;
        struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
@@ -1003,7 +1109,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
        synchronize_irq(irq);
        ocrdma_flush_cq(cq);
 
-       status = ocrdma_mbx_destroy_cq(dev, cq);
+       (void)ocrdma_mbx_destroy_cq(dev, cq);
        if (cq->ucontext) {
                pdid = cq->ucontext->cntxt_pd->id;
                ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
@@ -1014,7 +1120,7 @@ int ocrdma_destroy_cq(struct ib_cq *ibcq)
        }
 
        kfree(cq);
-       return status;
+       return 0;
 }
 
 static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
@@ -1113,8 +1219,8 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
        int status = 0;
        u64 usr_db;
        struct ocrdma_create_qp_uresp uresp;
-       struct ocrdma_dev *dev = qp->dev;
        struct ocrdma_pd *pd = qp->pd;
+       struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
 
        memset(&uresp, 0, sizeof(uresp));
        usr_db = dev->nic_info.unmapped_db +
@@ -1253,7 +1359,6 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
                status = -ENOMEM;
                goto gen_err;
        }
-       qp->dev = dev;
        ocrdma_set_qp_init_params(qp, pd, attrs);
        if (udata == NULL)
                qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
@@ -1312,7 +1417,7 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        enum ib_qp_state old_qps;
 
        qp = get_ocrdma_qp(ibqp);
-       dev = qp->dev;
+       dev = get_ocrdma_dev(ibqp->device);
        if (attr_mask & IB_QP_STATE)
                status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
        /* if new and previous states are same hw doesn't need to
@@ -1335,7 +1440,7 @@ int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        enum ib_qp_state old_qps, new_qps;
 
        qp = get_ocrdma_qp(ibqp);
-       dev = qp->dev;
+       dev = get_ocrdma_dev(ibqp->device);
 
        /* syncronize with multiple context trying to change, retrive qps */
        mutex_lock(&dev->dev_lock);
@@ -1402,7 +1507,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
        u32 qp_state;
        struct ocrdma_qp_params params;
        struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
 
        memset(&params, 0, sizeof(params));
        mutex_lock(&dev->dev_lock);
@@ -1412,8 +1517,6 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
                goto mbx_err;
        if (qp->qp_type == IB_QPT_UD)
                qp_attr->qkey = params.qkey;
-       qp_attr->qp_state = get_ibqp_state(IB_QPS_INIT);
-       qp_attr->cur_qp_state = get_ibqp_state(IB_QPS_INIT);
        qp_attr->path_mtu =
                ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
                                OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
@@ -1468,6 +1571,8 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
        memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
        qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
                    OCRDMA_QP_PARAMS_STATE_SHIFT;
+       qp_attr->qp_state = get_ibqp_state(qp_state);
+       qp_attr->cur_qp_state = qp_attr->qp_state;
        qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
        qp_attr->max_dest_rd_atomic =
            params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
@@ -1475,19 +1580,18 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
            params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
        qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
                                OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
+       /* Sync driver QP state with FW */
+       ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
 mbx_err:
        return status;
 }
 
-static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, int idx)
+static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
 {
-       int i = idx / 32;
-       unsigned int mask = (1 << (idx % 32));
+       unsigned int i = idx / 32;
+       u32 mask = (1U << (idx % 32));
 
-       if (srq->idx_bit_fields[i] & mask)
-               srq->idx_bit_fields[i] &= ~mask;
-       else
-               srq->idx_bit_fields[i] |= mask;
+       srq->idx_bit_fields[i] ^= mask;
 }
 
 static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
@@ -1596,7 +1700,7 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
 {
        int found = false;
        unsigned long flags;
-       struct ocrdma_dev *dev = qp->dev;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
        /* sync with any active CQ poll */
 
        spin_lock_irqsave(&dev->flush_q_lock, flags);
@@ -1613,7 +1717,6 @@ void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
 
 int ocrdma_destroy_qp(struct ib_qp *ibqp)
 {
-       int status;
        struct ocrdma_pd *pd;
        struct ocrdma_qp *qp;
        struct ocrdma_dev *dev;
@@ -1622,7 +1725,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
        unsigned long flags;
 
        qp = get_ocrdma_qp(ibqp);
-       dev = qp->dev;
+       dev = get_ocrdma_dev(ibqp->device);
 
        attrs.qp_state = IB_QPS_ERR;
        pd = qp->pd;
@@ -1635,7 +1738,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
         * discarded until the old CQEs are discarded.
         */
        mutex_lock(&dev->dev_lock);
-       status = ocrdma_mbx_destroy_qp(dev, qp);
+       (void) ocrdma_mbx_destroy_qp(dev, qp);
 
        /*
         * acquire CQ lock while destroy is in progress, in order to
@@ -1670,7 +1773,7 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
        kfree(qp->wqe_wr_id_tbl);
        kfree(qp->rqe_wr_id_tbl);
        kfree(qp);
-       return status;
+       return 0;
 }
 
 static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
@@ -1831,6 +1934,8 @@ static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
        else
                ud_hdr->qkey = wr->wr.ud.remote_qkey;
        ud_hdr->rsvd_ahid = ah->id;
+       if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
+               hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
 }
 
 static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
@@ -2007,11 +2112,12 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
        u64 fbo;
        struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
        struct ocrdma_mr *mr;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
        u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
 
        wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
 
-       if (wr->wr.fast_reg.page_list_len > qp->dev->attr.max_pages_per_frmr)
+       if (wr->wr.fast_reg.page_list_len > dev->attr.max_pages_per_frmr)
                return -EINVAL;
 
        hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
@@ -2039,7 +2145,7 @@ static int ocrdma_build_fr(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
        fast_reg->size_sge =
                get_encoded_page_size(1 << wr->wr.fast_reg.page_shift);
        mr = (struct ocrdma_mr *) (unsigned long)
-               qp->dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
+               dev->stag_arr[(hdr->lkey >> 8) & (OCRDMA_MAX_STAG - 1)];
        build_frmr_pbes(wr, mr->hwmr.pbl_table, &mr->hwmr);
        return 0;
 }
@@ -2112,8 +2218,6 @@ int ocrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
                        status = ocrdma_build_write(qp, hdr, wr);
                        break;
-               case IB_WR_RDMA_READ_WITH_INV:
-                       hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
                case IB_WR_RDMA_READ:
                        ocrdma_build_read(qp, hdr, wr);
                        break;
@@ -2484,8 +2588,11 @@ static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
                                 bool *polled, bool *stop)
 {
        bool expand;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
        int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
                OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
+       if (status < OCRDMA_MAX_CQE_ERR)
+               atomic_inc(&dev->cqe_err_stats[status]);
 
        /* when hw sq is empty, but rq is not empty, so we continue
         * to keep the cqe in order to get the cq event again.
@@ -2604,6 +2711,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
                                int status)
 {
        bool expand;
+       struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
+
+       if (status < OCRDMA_MAX_CQE_ERR)
+               atomic_inc(&dev->cqe_err_stats[status]);
 
        /* when hw_rq is empty, but wq is not empty, so continue
         * to keep the cqe to get the cq event again.
index c00ae093b6f881870867b8dac16af33efca4091b..ffd48bfc4923457e5383345acfa3620fa5f6a52f 100644 (file)
@@ -1082,12 +1082,6 @@ struct qib_devdata {
        /* control high-level access to EEPROM */
        struct mutex eep_lock;
        uint64_t traffic_wds;
-       /* active time is kept in seconds, but logged in hours */
-       atomic_t active_time;
-       /* Below are nominal shadow of EEPROM, new since last EEPROM update */
-       uint8_t eep_st_errs[QIB_EEP_LOG_CNT];
-       uint8_t eep_st_new_errs[QIB_EEP_LOG_CNT];
-       uint16_t eep_hrs;
        /*
         * masks for which bits of errs, hwerrs that cause
         * each of the counters to increment.
@@ -1309,8 +1303,7 @@ int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr, void *buffer,
 int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
                    const void *buffer, int len);
 void qib_get_eeprom_info(struct qib_devdata *);
-int qib_update_eeprom_log(struct qib_devdata *dd);
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr);
+#define qib_inc_eeprom_err(dd, eidx, incr)
 void qib_dump_lookup_output_queue(struct qib_devdata *);
 void qib_force_pio_avail_update(struct qib_devdata *);
 void qib_clear_symerror_on_linkup(unsigned long opaque);
@@ -1467,11 +1460,14 @@ const char *qib_get_unit_name(int unit);
  * Flush write combining store buffers (if present) and perform a write
  * barrier.
  */
+static inline void qib_flush_wc(void)
+{
 #if defined(CONFIG_X86_64)
-#define qib_flush_wc() asm volatile("sfence" : : : "memory")
+       asm volatile("sfence" : : : "memory");
 #else
-#define qib_flush_wc() wmb() /* no reorder around wc flush */
+       wmb(); /* no reorder around wc flush */
 #endif
+}
 
 /* global module parameter variables */
 extern unsigned qib_ibmtu;
index 5670ace27c639adb351b9928c65ed081a599a910..4fb78abd8ba1ad69629a9752878d4aa30a4f2229 100644 (file)
@@ -257,7 +257,7 @@ struct qib_base_info {
 
        /* shared memory page for send buffer disarm status */
        __u64 spi_sendbuf_status;
-} __attribute__ ((aligned(8)));
+} __aligned(8);
 
 /*
  * This version number is given to the driver by the user code during
@@ -361,7 +361,7 @@ struct qib_user_info {
         */
        __u64 spu_base_info;
 
-} __attribute__ ((aligned(8)));
+} __aligned(8);
 
 /* User commands. */
 
index 6abd3ed3cd51ecf2c0a21927b0cf4894f7c48941..5e75b43c596b608adfacfeb48ea955ffa914082d 100644 (file)
@@ -255,7 +255,6 @@ void qib_dbg_ibdev_init(struct qib_ibdev *ibd)
        DEBUGFS_FILE_CREATE(opcode_stats);
        DEBUGFS_FILE_CREATE(ctx_stats);
        DEBUGFS_FILE_CREATE(qp_stats);
-       return;
 }
 
 void qib_dbg_ibdev_exit(struct qib_ibdev *ibd)
index 5dfda4c5cc9c3b1fde36d02c55cd387b7e366542..8c34b23e5bf670b92b3d1cec4d7b9c9e93ba9a20 100644 (file)
@@ -85,7 +85,7 @@ static struct qib_diag_client *get_client(struct qib_devdata *dd)
                client_pool = dc->next;
        else
                /* None in pool, alloc and init */
-               dc = kmalloc(sizeof *dc, GFP_KERNEL);
+               dc = kmalloc(sizeof(*dc), GFP_KERNEL);
 
        if (dc) {
                dc->next = NULL;
@@ -257,6 +257,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
        if (dd->userbase) {
                /* If user regs mapped, they are after send, so set limit. */
                u32 ulim = (dd->cfgctxts * dd->ureg_align) + dd->uregbase;
+
                if (!dd->piovl15base)
                        snd_lim = dd->uregbase;
                krb32 = (u32 __iomem *)dd->userbase;
@@ -280,6 +281,7 @@ static u32 __iomem *qib_remap_ioaddr32(struct qib_devdata *dd, u32 offset,
        snd_bottom = dd->pio2k_bufbase;
        if (snd_lim == 0) {
                u32 tot2k = dd->piobcnt2k * ALIGN(dd->piosize2k, dd->palign);
+
                snd_lim = snd_bottom + tot2k;
        }
        /* If 4k buffers exist, account for them by bumping
@@ -398,6 +400,7 @@ static int qib_write_umem64(struct qib_devdata *dd, u32 regoffs,
        /* not very efficient, but it works for now */
        while (reg_addr < reg_end) {
                u64 data;
+
                if (copy_from_user(&data, uaddr, sizeof(data))) {
                        ret = -EFAULT;
                        goto bail;
@@ -698,7 +701,7 @@ int qib_register_observer(struct qib_devdata *dd,
 
        if (!dd || !op)
                return -EINVAL;
-       olp = vmalloc(sizeof *olp);
+       olp = vmalloc(sizeof(*olp));
        if (!olp) {
                pr_err("vmalloc for observer failed\n");
                return -ENOMEM;
@@ -796,6 +799,7 @@ static ssize_t qib_diag_read(struct file *fp, char __user *data,
                op = diag_get_observer(dd, *off);
                if (op) {
                        u32 offset = *off;
+
                        ret = op->hook(dd, op, offset, &data64, 0, use_32);
                }
                /*
@@ -873,6 +877,7 @@ static ssize_t qib_diag_write(struct file *fp, const char __user *data,
                if (count == 4 || count == 8) {
                        u64 data64;
                        u32 offset = *off;
+
                        ret = copy_from_user(&data64, data, count);
                        if (ret) {
                                ret = -EFAULT;
index 5bee08f16d7438d2eba1668bdccee64265c9972d..f58fdc3d25a29a71909a22c772217c62a975b785 100644 (file)
@@ -86,7 +86,7 @@ const char *qib_get_unit_name(int unit)
 {
        static char iname[16];
 
-       snprintf(iname, sizeof iname, "infinipath%u", unit);
+       snprintf(iname, sizeof(iname), "infinipath%u", unit);
        return iname;
 }
 
@@ -349,6 +349,7 @@ static u32 qib_rcv_hdrerr(struct qib_ctxtdata *rcd, struct qib_pportdata *ppd,
                qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK;
                if (qp_num != QIB_MULTICAST_QPN) {
                        int ruc_res;
+
                        qp = qib_lookup_qpn(ibp, qp_num);
                        if (!qp)
                                goto drop;
@@ -461,6 +462,7 @@ u32 qib_kreceive(struct qib_ctxtdata *rcd, u32 *llic, u32 *npkts)
        rhf_addr = (__le32 *) rcd->rcvhdrq + l + dd->rhf_offset;
        if (dd->flags & QIB_NODMA_RTAIL) {
                u32 seq = qib_hdrget_seq(rhf_addr);
+
                if (seq != rcd->seq_cnt)
                        goto bail;
                hdrqtail = 0;
@@ -651,6 +653,7 @@ bail:
 int qib_set_lid(struct qib_pportdata *ppd, u32 lid, u8 lmc)
 {
        struct qib_devdata *dd = ppd->dd;
+
        ppd->lid = lid;
        ppd->lmc = lmc;
 
index 4d5d71aaa2b4e53e319b3c9cc57be37291f3cdd7..311ee6c3dd5e8b2e39382ef1c5d2741040a47636 100644 (file)
@@ -153,6 +153,7 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
 
        if (t && dd0->nguid > 1 && t <= dd0->nguid) {
                u8 oguid;
+
                dd->base_guid = dd0->base_guid;
                bguid = (u8 *) &dd->base_guid;
 
@@ -251,206 +252,25 @@ void qib_get_eeprom_info(struct qib_devdata *dd)
                 * This board has a Serial-prefix, which is stored
                 * elsewhere for backward-compatibility.
                 */
-               memcpy(snp, ifp->if_sprefix, sizeof ifp->if_sprefix);
-               snp[sizeof ifp->if_sprefix] = '\0';
+               memcpy(snp, ifp->if_sprefix, sizeof(ifp->if_sprefix));
+               snp[sizeof(ifp->if_sprefix)] = '\0';
                len = strlen(snp);
                snp += len;
-               len = (sizeof dd->serial) - len;
-               if (len > sizeof ifp->if_serial)
-                       len = sizeof ifp->if_serial;
+               len = sizeof(dd->serial) - len;
+               if (len > sizeof(ifp->if_serial))
+                       len = sizeof(ifp->if_serial);
                memcpy(snp, ifp->if_serial, len);
-       } else
-               memcpy(dd->serial, ifp->if_serial,
-                      sizeof ifp->if_serial);
+       } else {
+               memcpy(dd->serial, ifp->if_serial, sizeof(ifp->if_serial));
+       }
        if (!strstr(ifp->if_comment, "Tested successfully"))
                qib_dev_err(dd,
                        "Board SN %s did not pass functional test: %s\n",
                        dd->serial, ifp->if_comment);
 
-       memcpy(&dd->eep_st_errs, &ifp->if_errcntp, QIB_EEP_LOG_CNT);
-       /*
-        * Power-on (actually "active") hours are kept as little-endian value
-        * in EEPROM, but as seconds in a (possibly as small as 24-bit)
-        * atomic_t while running.
-        */
-       atomic_set(&dd->active_time, 0);
-       dd->eep_hrs = ifp->if_powerhour[0] | (ifp->if_powerhour[1] << 8);
-
 done:
        vfree(buf);
 
 bail:;
 }
 
-/**
- * qib_update_eeprom_log - copy active-time and error counters to eeprom
- * @dd: the qlogic_ib device
- *
- * Although the time is kept as seconds in the qib_devdata struct, it is
- * rounded to hours for re-write, as we have only 16 bits in EEPROM.
- * First-cut code reads whole (expected) struct qib_flash, modifies,
- * re-writes. Future direction: read/write only what we need, assuming
- * that the EEPROM had to have been "good enough" for driver init, and
- * if not, we aren't making it worse.
- *
- */
-int qib_update_eeprom_log(struct qib_devdata *dd)
-{
-       void *buf;
-       struct qib_flash *ifp;
-       int len, hi_water;
-       uint32_t new_time, new_hrs;
-       u8 csum;
-       int ret, idx;
-       unsigned long flags;
-
-       /* first, check if we actually need to do anything. */
-       ret = 0;
-       for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-               if (dd->eep_st_new_errs[idx]) {
-                       ret = 1;
-                       break;
-               }
-       }
-       new_time = atomic_read(&dd->active_time);
-
-       if (ret == 0 && new_time < 3600)
-               goto bail;
-
-       /*
-        * The quick-check above determined that there is something worthy
-        * of logging, so get current contents and do a more detailed idea.
-        * read full flash, not just currently used part, since it may have
-        * been written with a newer definition
-        */
-       len = sizeof(struct qib_flash);
-       buf = vmalloc(len);
-       ret = 1;
-       if (!buf) {
-               qib_dev_err(dd,
-                       "Couldn't allocate memory to read %u bytes from eeprom for logging\n",
-                       len);
-               goto bail;
-       }
-
-       /* Grab semaphore and read current EEPROM. If we get an
-        * error, let go, but if not, keep it until we finish write.
-        */
-       ret = mutex_lock_interruptible(&dd->eep_lock);
-       if (ret) {
-               qib_dev_err(dd, "Unable to acquire EEPROM for logging\n");
-               goto free_bail;
-       }
-       ret = qib_twsi_blk_rd(dd, dd->twsi_eeprom_dev, 0, buf, len);
-       if (ret) {
-               mutex_unlock(&dd->eep_lock);
-               qib_dev_err(dd, "Unable read EEPROM for logging\n");
-               goto free_bail;
-       }
-       ifp = (struct qib_flash *)buf;
-
-       csum = flash_csum(ifp, 0);
-       if (csum != ifp->if_csum) {
-               mutex_unlock(&dd->eep_lock);
-               qib_dev_err(dd, "EEPROM cks err (0x%02X, S/B 0x%02X)\n",
-                           csum, ifp->if_csum);
-               ret = 1;
-               goto free_bail;
-       }
-       hi_water = 0;
-       spin_lock_irqsave(&dd->eep_st_lock, flags);
-       for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-               int new_val = dd->eep_st_new_errs[idx];
-               if (new_val) {
-                       /*
-                        * If we have seen any errors, add to EEPROM values
-                        * We need to saturate at 0xFF (255) and we also
-                        * would need to adjust the checksum if we were
-                        * trying to minimize EEPROM traffic
-                        * Note that we add to actual current count in EEPROM,
-                        * in case it was altered while we were running.
-                        */
-                       new_val += ifp->if_errcntp[idx];
-                       if (new_val > 0xFF)
-                               new_val = 0xFF;
-                       if (ifp->if_errcntp[idx] != new_val) {
-                               ifp->if_errcntp[idx] = new_val;
-                               hi_water = offsetof(struct qib_flash,
-                                                   if_errcntp) + idx;
-                       }
-                       /*
-                        * update our shadow (used to minimize EEPROM
-                        * traffic), to match what we are about to write.
-                        */
-                       dd->eep_st_errs[idx] = new_val;
-                       dd->eep_st_new_errs[idx] = 0;
-               }
-       }
-       /*
-        * Now update active-time. We would like to round to the nearest hour
-        * but unless atomic_t are sure to be proper signed ints we cannot,
-        * because we need to account for what we "transfer" to EEPROM and
-        * if we log an hour at 31 minutes, then we would need to set
-        * active_time to -29 to accurately count the _next_ hour.
-        */
-       if (new_time >= 3600) {
-               new_hrs = new_time / 3600;
-               atomic_sub((new_hrs * 3600), &dd->active_time);
-               new_hrs += dd->eep_hrs;
-               if (new_hrs > 0xFFFF)
-                       new_hrs = 0xFFFF;
-               dd->eep_hrs = new_hrs;
-               if ((new_hrs & 0xFF) != ifp->if_powerhour[0]) {
-                       ifp->if_powerhour[0] = new_hrs & 0xFF;
-                       hi_water = offsetof(struct qib_flash, if_powerhour);
-               }
-               if ((new_hrs >> 8) != ifp->if_powerhour[1]) {
-                       ifp->if_powerhour[1] = new_hrs >> 8;
-                       hi_water = offsetof(struct qib_flash, if_powerhour) + 1;
-               }
-       }
-       /*
-        * There is a tiny possibility that we could somehow fail to write
-        * the EEPROM after updating our shadows, but problems from holding
-        * the spinlock too long are a much bigger issue.
-        */
-       spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-       if (hi_water) {
-               /* we made some change to the data, uopdate cksum and write */
-               csum = flash_csum(ifp, 1);
-               ret = eeprom_write_with_enable(dd, 0, buf, hi_water + 1);
-       }
-       mutex_unlock(&dd->eep_lock);
-       if (ret)
-               qib_dev_err(dd, "Failed updating EEPROM\n");
-
-free_bail:
-       vfree(buf);
-bail:
-       return ret;
-}
-
-/**
- * qib_inc_eeprom_err - increment one of the four error counters
- * that are logged to EEPROM.
- * @dd: the qlogic_ib device
- * @eidx: 0..3, the counter to increment
- * @incr: how much to add
- *
- * Each counter is 8-bits, and saturates at 255 (0xFF). They
- * are copied to the EEPROM (aka flash) whenever qib_update_eeprom_log()
- * is called, but it can only be called in a context that allows sleep.
- * This function can be called even at interrupt level.
- */
-void qib_inc_eeprom_err(struct qib_devdata *dd, u32 eidx, u32 incr)
-{
-       uint new_val;
-       unsigned long flags;
-
-       spin_lock_irqsave(&dd->eep_st_lock, flags);
-       new_val = dd->eep_st_new_errs[eidx] + incr;
-       if (new_val > 255)
-               new_val = 255;
-       dd->eep_st_new_errs[eidx] = new_val;
-       spin_unlock_irqrestore(&dd->eep_st_lock, flags);
-}
index b15e34eeef685d510c781d25d08433e4b3e7c717..41937c6f888af13deadb6c7b25678cfc34596cf8 100644 (file)
@@ -351,9 +351,10 @@ static int qib_tid_update(struct qib_ctxtdata *rcd, struct file *fp,
                 * unless perhaps the user has mpin'ed the pages
                 * themselves.
                 */
-               qib_devinfo(dd->pcidev,
-                        "Failed to lock addr %p, %u pages: "
-                        "errno %d\n", (void *) vaddr, cnt, -ret);
+               qib_devinfo(
+                       dd->pcidev,
+                       "Failed to lock addr %p, %u pages: errno %d\n",
+                       (void *) vaddr, cnt, -ret);
                goto done;
        }
        for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
@@ -437,7 +438,7 @@ cleanup:
                        goto cleanup;
                }
                if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
-                                tidmap, sizeof tidmap)) {
+                                tidmap, sizeof(tidmap))) {
                        ret = -EFAULT;
                        goto cleanup;
                }
@@ -484,7 +485,7 @@ static int qib_tid_free(struct qib_ctxtdata *rcd, unsigned subctxt,
        }
 
        if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
-                          sizeof tidmap)) {
+                          sizeof(tidmap))) {
                ret = -EFAULT;
                goto done;
        }
@@ -951,8 +952,8 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
                /* rcvegrbufs are read-only on the slave */
                if (vma->vm_flags & VM_WRITE) {
                        qib_devinfo(dd->pcidev,
-                                "Can't map eager buffers as "
-                                "writable (flags=%lx)\n", vma->vm_flags);
+                                "Can't map eager buffers as writable (flags=%lx)\n",
+                                vma->vm_flags);
                        ret = -EPERM;
                        goto bail;
                }
@@ -1185,6 +1186,7 @@ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
         */
        if (weight >= qib_cpulist_count) {
                int cpu;
+
                cpu = find_first_zero_bit(qib_cpulist,
                                          qib_cpulist_count);
                if (cpu == qib_cpulist_count)
@@ -1247,10 +1249,7 @@ static int init_subctxts(struct qib_devdata *dd,
        if (!qib_compatible_subctxts(uinfo->spu_userversion >> 16,
                uinfo->spu_userversion & 0xffff)) {
                qib_devinfo(dd->pcidev,
-                        "Mismatched user version (%d.%d) and driver "
-                        "version (%d.%d) while context sharing. Ensure "
-                        "that driver and library are from the same "
-                        "release.\n",
+                        "Mismatched user version (%d.%d) and driver version (%d.%d) while context sharing. Ensure that driver and library are from the same release.\n",
                         (int) (uinfo->spu_userversion >> 16),
                         (int) (uinfo->spu_userversion & 0xffff),
                         QIB_USER_SWMAJOR, QIB_USER_SWMINOR);
@@ -1391,6 +1390,7 @@ static int choose_port_ctxt(struct file *fp, struct qib_devdata *dd, u32 port,
        }
        if (!ppd) {
                u32 pidx = ctxt % dd->num_pports;
+
                if (usable(dd->pport + pidx))
                        ppd = dd->pport + pidx;
                else {
@@ -1438,10 +1438,12 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
 
        if (alg == QIB_PORT_ALG_ACROSS) {
                unsigned inuse = ~0U;
+
                /* find device (with ACTIVE ports) with fewest ctxts in use */
                for (ndev = 0; ndev < devmax; ndev++) {
                        struct qib_devdata *dd = qib_lookup(ndev);
                        unsigned cused = 0, cfree = 0, pusable = 0;
+
                        if (!dd)
                                continue;
                        if (port && port <= dd->num_pports &&
@@ -1471,6 +1473,7 @@ static int get_a_ctxt(struct file *fp, const struct qib_user_info *uinfo,
        } else {
                for (ndev = 0; ndev < devmax; ndev++) {
                        struct qib_devdata *dd = qib_lookup(ndev);
+
                        if (dd) {
                                ret = choose_port_ctxt(fp, dd, port, uinfo);
                                if (!ret)
@@ -1556,6 +1559,7 @@ static int find_hca(unsigned int cpu, int *unit)
        }
        for (ndev = 0; ndev < devmax; ndev++) {
                struct qib_devdata *dd = qib_lookup(ndev);
+
                if (dd) {
                        if (pcibus_to_node(dd->pcidev->bus) < 0) {
                                ret = -EINVAL;
index 81854586c081fee37e0b3933a26d08e5e8b4e815..650897a8591e872f338d03994da3ed18e5a51bf7 100644 (file)
@@ -106,7 +106,7 @@ static ssize_t driver_stats_read(struct file *file, char __user *buf,
 {
        qib_stats.sps_ints = qib_sps_ints();
        return simple_read_from_buffer(buf, count, ppos, &qib_stats,
-                                      sizeof qib_stats);
+                                      sizeof(qib_stats));
 }
 
 /*
@@ -133,7 +133,7 @@ static ssize_t driver_names_read(struct file *file, char __user *buf,
                                 size_t count, loff_t *ppos)
 {
        return simple_read_from_buffer(buf, count, ppos, qib_statnames,
-               sizeof qib_statnames - 1); /* no null */
+               sizeof(qib_statnames) - 1); /* no null */
 }
 
 static const struct file_operations driver_ops[] = {
@@ -379,7 +379,7 @@ static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd)
        int ret, i;
 
        /* create the per-unit directory */
-       snprintf(unit, sizeof unit, "%u", dd->unit);
+       snprintf(unit, sizeof(unit), "%u", dd->unit);
        ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir,
                          &simple_dir_operations, dd);
        if (ret) {
@@ -455,7 +455,7 @@ static int remove_file(struct dentry *parent, char *name)
        }
 
        spin_lock(&tmp->d_lock);
-       if (!(d_unhashed(tmp) && tmp->d_inode)) {
+       if (!d_unhashed(tmp) && tmp->d_inode) {
                __d_drop(tmp);
                spin_unlock(&tmp->d_lock);
                simple_unlink(parent->d_inode, tmp);
@@ -482,7 +482,7 @@ static int remove_device_files(struct super_block *sb,
 
        root = dget(sb->s_root);
        mutex_lock(&root->d_inode->i_mutex);
-       snprintf(unit, sizeof unit, "%u", dd->unit);
+       snprintf(unit, sizeof(unit), "%u", dd->unit);
        dir = lookup_one_len(unit, root, strlen(unit));
 
        if (IS_ERR(dir)) {
@@ -560,6 +560,7 @@ static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags,
                        const char *dev_name, void *data)
 {
        struct dentry *ret;
+
        ret = mount_single(fs_type, flags, data, qibfs_fill_super);
        if (!IS_ERR(ret))
                qib_super = ret->d_sb;
index d68266ac7619b896c49e500256c263ec1c3619c1..0d2ba59af30af66bce01ef8132c8182cc6e44a33 100644 (file)
@@ -333,6 +333,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
                                  enum qib_ureg regno, u64 value, int ctxt)
 {
        u64 __iomem *ubase;
+
        if (dd->userbase)
                ubase = (u64 __iomem *)
                        ((char __iomem *) dd->userbase +
@@ -834,14 +835,14 @@ static void qib_handle_6120_hwerrors(struct qib_devdata *dd, char *msg,
                bits = (u32) ((hwerrs >>
                               QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
                              QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PCIe Mem Parity Errs %x] ", bits);
                strlcat(msg, bitsmsg, msgl);
        }
 
        if (hwerrs & _QIB_PLL_FAIL) {
                isfatal = 1;
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PLL failed (%llx), InfiniPath hardware unusable]",
                         (unsigned long long) hwerrs & _QIB_PLL_FAIL);
                strlcat(msg, bitsmsg, msgl);
@@ -1014,7 +1015,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
 
        /* do these first, they are most important */
        if (errs & ERR_MASK(HardwareErr))
-               qib_handle_6120_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+               qib_handle_6120_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
        else
                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1062,7 +1063,7 @@ static void handle_6120_errors(struct qib_devdata *dd, u64 errs)
         */
        mask = ERR_MASK(IBStatusChanged) | ERR_MASK(RcvEgrFullErr) |
                ERR_MASK(RcvHdrFullErr) | ERR_MASK(HardwareErr);
-       qib_decode_6120_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+       qib_decode_6120_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
 
        if (errs & E_SUM_PKTERRS)
                qib_stats.sps_rcverrs++;
@@ -1670,6 +1671,7 @@ static irqreturn_t qib_6120intr(int irq, void *data)
                }
                if (crcs) {
                        u32 cntr = dd->cspec->lli_counter;
+
                        cntr += crcs;
                        if (cntr) {
                                if (cntr > dd->cspec->lli_thresh) {
@@ -1722,6 +1724,7 @@ static void qib_setup_6120_interrupt(struct qib_devdata *dd)
                        "irq is 0, BIOS error?  Interrupts won't work\n");
        else {
                int ret;
+
                ret = request_irq(dd->cspec->irq, qib_6120intr, 0,
                                  QIB_DRV_NAME, dd);
                if (ret)
@@ -2681,8 +2684,6 @@ static void qib_get_6120_faststats(unsigned long opaque)
        spin_lock_irqsave(&dd->eep_st_lock, flags);
        traffic_wds -= dd->traffic_wds;
        dd->traffic_wds += traffic_wds;
-       if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-               atomic_add(5, &dd->active_time); /* S/B #define */
        spin_unlock_irqrestore(&dd->eep_st_lock, flags);
 
        qib_chk_6120_errormask(dd);
@@ -2929,6 +2930,7 @@ bail:
 static int qib_6120_set_loopback(struct qib_pportdata *ppd, const char *what)
 {
        int ret = 0;
+
        if (!strncmp(what, "ibc", 3)) {
                ppd->dd->cspec->ibcctrl |= SYM_MASK(IBCCtrl, Loopback);
                qib_devinfo(ppd->dd->pcidev, "Enabling IB%u:%u IBC loopback\n",
@@ -3170,6 +3172,7 @@ static void get_6120_chip_params(struct qib_devdata *dd)
 static void set_6120_baseaddrs(struct qib_devdata *dd)
 {
        u32 cregbase;
+
        cregbase = qib_read_kreg32(dd, kr_counterregbase);
        dd->cspec->cregbase = (u64 __iomem *)
                ((char __iomem *) dd->kregbase + cregbase);
index 7dec89fdc1248dc69c3cab38069e5e2f32986ce3..22affda8af88eacbd11f21abab55ba299dfb0e0b 100644 (file)
@@ -902,7 +902,8 @@ static void sdma_7220_errors(struct qib_pportdata *ppd, u64 errs)
        errs &= QLOGIC_IB_E_SDMAERRS;
 
        msg = dd->cspec->sdmamsgbuf;
-       qib_decode_7220_sdma_errs(ppd, errs, msg, sizeof dd->cspec->sdmamsgbuf);
+       qib_decode_7220_sdma_errs(ppd, errs, msg,
+               sizeof(dd->cspec->sdmamsgbuf));
        spin_lock_irqsave(&ppd->sdma_lock, flags);
 
        if (errs & ERR_MASK(SendBufMisuseErr)) {
@@ -1043,6 +1044,7 @@ done:
 static void reenable_7220_chase(unsigned long opaque)
 {
        struct qib_pportdata *ppd = (struct qib_pportdata *)opaque;
+
        ppd->cpspec->chase_timer.expires = 0;
        qib_set_ib_7220_lstate(ppd, QLOGIC_IB_IBCC_LINKCMD_DOWN,
                QLOGIC_IB_IBCC_LINKINITCMD_POLL);
@@ -1101,7 +1103,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
 
        /* do these first, they are most important */
        if (errs & ERR_MASK(HardwareErr))
-               qib_7220_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+               qib_7220_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
        else
                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1155,7 +1157,7 @@ static void handle_7220_errors(struct qib_devdata *dd, u64 errs)
                ERR_MASK(RcvEgrFullErr) | ERR_MASK(RcvHdrFullErr) |
                ERR_MASK(HardwareErr) | ERR_MASK(SDmaDisabledErr);
 
-       qib_decode_7220_err(dd, msg, sizeof dd->cspec->emsgbuf, errs & ~mask);
+       qib_decode_7220_err(dd, msg, sizeof(dd->cspec->emsgbuf), errs & ~mask);
 
        if (errs & E_SUM_PKTERRS)
                qib_stats.sps_rcverrs++;
@@ -1380,7 +1382,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
                bits = (u32) ((hwerrs >>
                               QLOGIC_IB_HWE_PCIEMEMPARITYERR_SHIFT) &
                              QLOGIC_IB_HWE_PCIEMEMPARITYERR_MASK);
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PCIe Mem Parity Errs %x] ", bits);
                strlcat(msg, bitsmsg, msgl);
        }
@@ -1390,7 +1392,7 @@ static void qib_7220_handle_hwerrors(struct qib_devdata *dd, char *msg,
 
        if (hwerrs & _QIB_PLL_FAIL) {
                isfatal = 1;
-               snprintf(bitsmsg, sizeof dd->cspec->bitsmsgbuf,
+               snprintf(bitsmsg, sizeof(dd->cspec->bitsmsgbuf),
                         "[PLL failed (%llx), InfiniPath hardware unusable]",
                         (unsigned long long) hwerrs & _QIB_PLL_FAIL);
                strlcat(msg, bitsmsg, msgl);
@@ -3297,8 +3299,6 @@ static void qib_get_7220_faststats(unsigned long opaque)
        spin_lock_irqsave(&dd->eep_st_lock, flags);
        traffic_wds -= dd->traffic_wds;
        dd->traffic_wds += traffic_wds;
-       if (traffic_wds  >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-               atomic_add(5, &dd->active_time); /* S/B #define */
        spin_unlock_irqrestore(&dd->eep_st_lock, flags);
 done:
        mod_timer(&dd->stats_timer, jiffies + HZ * ACTIVITY_TIMER);
index a7eb32517a04bced55f9d1d5720b5d060d96e92b..ef97b71c8f7dd713a77401f593c6a10320a046e6 100644 (file)
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(chase, "Enable state chase handling");
 
 static ushort qib_long_atten = 10; /* 10 dB ~= 5m length */
 module_param_named(long_attenuation, qib_long_atten, ushort, S_IRUGO);
-MODULE_PARM_DESC(long_attenuation, \
+MODULE_PARM_DESC(long_attenuation,
                 "attenuation cutoff (dB) for long copper cable setup");
 
 static ushort qib_singleport;
@@ -153,11 +153,12 @@ static struct kparam_string kp_txselect = {
 static int  setup_txselect(const char *, struct kernel_param *);
 module_param_call(txselect, setup_txselect, param_get_string,
                  &kp_txselect, S_IWUSR | S_IRUGO);
-MODULE_PARM_DESC(txselect, \
+MODULE_PARM_DESC(txselect,
                 "Tx serdes indices (for no QSFP or invalid QSFP data)");
 
 #define BOARD_QME7342 5
 #define BOARD_QMH7342 6
+#define BOARD_QMH7360 9
 #define IS_QMH(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
                    BOARD_QMH7342)
 #define IS_QME(dd) (SYM_FIELD((dd)->revision, Revision, BoardID) == \
@@ -817,6 +818,7 @@ static inline void qib_write_ureg(const struct qib_devdata *dd,
                                  enum qib_ureg regno, u64 value, int ctxt)
 {
        u64 __iomem *ubase;
+
        if (dd->userbase)
                ubase = (u64 __iomem *)
                        ((char __iomem *) dd->userbase +
@@ -1677,7 +1679,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
        /* do these first, they are most important */
        if (errs & QIB_E_HARDWARE) {
                *msg = '\0';
-               qib_7322_handle_hwerrors(dd, msg, sizeof dd->cspec->emsgbuf);
+               qib_7322_handle_hwerrors(dd, msg, sizeof(dd->cspec->emsgbuf));
        } else
                for (log_idx = 0; log_idx < QIB_EEP_LOG_CNT; ++log_idx)
                        if (errs & dd->eep_st_masks[log_idx].errs_to_log)
@@ -1702,7 +1704,7 @@ static noinline void handle_7322_errors(struct qib_devdata *dd)
        mask = QIB_E_HARDWARE;
        *msg = '\0';
 
-       err_decode(msg, sizeof dd->cspec->emsgbuf, errs & ~mask,
+       err_decode(msg, sizeof(dd->cspec->emsgbuf), errs & ~mask,
                   qib_7322error_msgs);
 
        /*
@@ -1889,10 +1891,10 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
        *msg = '\0';
 
        if (errs & ~QIB_E_P_BITSEXTANT) {
-               err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+               err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
                           errs & ~QIB_E_P_BITSEXTANT, qib_7322p_error_msgs);
                if (!*msg)
-                       snprintf(msg, sizeof ppd->cpspec->epmsgbuf,
+                       snprintf(msg, sizeof(ppd->cpspec->epmsgbuf),
                                 "no others");
                qib_dev_porterr(dd, ppd->port,
                        "error interrupt with unknown errors 0x%016Lx set (and %s)\n",
@@ -1906,7 +1908,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                /* determine cause, then write to clear */
                symptom = qib_read_kreg_port(ppd, krp_sendhdrsymptom);
                qib_write_kreg_port(ppd, krp_sendhdrsymptom, 0);
-               err_decode(msg, sizeof ppd->cpspec->epmsgbuf, symptom,
+               err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), symptom,
                           hdrchk_msgs);
                *msg = '\0';
                /* senderrbuf cleared in SPKTERRS below */
@@ -1922,7 +1924,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                         * isn't valid.  We don't want to confuse people, so
                         * we just don't print them, except at debug
                         */
-                       err_decode(msg, sizeof ppd->cpspec->epmsgbuf,
+                       err_decode(msg, sizeof(ppd->cpspec->epmsgbuf),
                                   (errs & QIB_E_P_LINK_PKTERRS),
                                   qib_7322p_error_msgs);
                        *msg = '\0';
@@ -1938,7 +1940,7 @@ static noinline void handle_7322_p_errors(struct qib_pportdata *ppd)
                 * valid.  We don't want to confuse people, so we just
                 * don't print them, except at debug
                 */
-               err_decode(msg, sizeof ppd->cpspec->epmsgbuf, errs,
+               err_decode(msg, sizeof(ppd->cpspec->epmsgbuf), errs,
                           qib_7322p_error_msgs);
                ignore_this_time = errs & QIB_E_P_LINK_PKTERRS;
                *msg = '\0';
@@ -2031,6 +2033,7 @@ static void qib_7322_set_intr_state(struct qib_devdata *dd, u32 enable)
                if (dd->cspec->num_msix_entries) {
                        /* and same for MSIx */
                        u64 val = qib_read_kreg64(dd, kr_intgranted);
+
                        if (val)
                                qib_write_kreg(dd, kr_intgranted, val);
                }
@@ -2176,6 +2179,7 @@ static void qib_7322_handle_hwerrors(struct qib_devdata *dd, char *msg,
                int err;
                unsigned long flags;
                struct qib_pportdata *ppd = dd->pport;
+
                for (; pidx < dd->num_pports; ++pidx, ppd++) {
                        err = 0;
                        if (pidx == 0 && (hwerrs &
@@ -2801,9 +2805,11 @@ static void qib_irq_notifier_notify(struct irq_affinity_notify *notify,
 
        if (n->rcv) {
                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
+
                qib_update_rhdrq_dca(rcd, cpu);
        } else {
                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
+
                qib_update_sdma_dca(ppd, cpu);
        }
 }
@@ -2816,9 +2822,11 @@ static void qib_irq_notifier_release(struct kref *ref)
 
        if (n->rcv) {
                struct qib_ctxtdata *rcd = (struct qib_ctxtdata *)n->arg;
+
                dd = rcd->dd;
        } else {
                struct qib_pportdata *ppd = (struct qib_pportdata *)n->arg;
+
                dd = ppd->dd;
        }
        qib_devinfo(dd->pcidev,
@@ -2994,6 +3002,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
                struct qib_pportdata *ppd;
                struct qib_qsfp_data *qd;
                u32 mask;
+
                if (!dd->pport[pidx].link_speed_supported)
                        continue;
                mask = QSFP_GPIO_MOD_PRS_N;
@@ -3001,6 +3010,7 @@ static noinline void unknown_7322_gpio_intr(struct qib_devdata *dd)
                mask <<= (QSFP_GPIO_PORT2_SHIFT * ppd->hw_pidx);
                if (gpiostatus & dd->cspec->gpio_mask & mask) {
                        u64 pins;
+
                        qd = &ppd->cpspec->qsfp_data;
                        gpiostatus &= ~mask;
                        pins = qib_read_kreg64(dd, kr_extstatus);
@@ -3442,7 +3452,7 @@ try_intx:
        }
 
        /* Try to get MSIx interrupts */
-       memset(redirect, 0, sizeof redirect);
+       memset(redirect, 0, sizeof(redirect));
        mask = ~0ULL;
        msixnum = 0;
        local_mask = cpumask_of_pcibus(dd->pcidev->bus);
@@ -3617,6 +3627,10 @@ static unsigned qib_7322_boardname(struct qib_devdata *dd)
                n = "InfiniPath_QME7362";
                dd->flags |= QIB_HAS_QSFP;
                break;
+       case BOARD_QMH7360:
+               n = "Intel IB QDR 1P FLR-QSFP Adptr";
+               dd->flags |= QIB_HAS_QSFP;
+               break;
        case 15:
                n = "InfiniPath_QLE7342_TEST";
                dd->flags |= QIB_HAS_QSFP;
@@ -3694,6 +3708,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd)
         */
        for (i = 0; i < msix_entries; i++) {
                u64 vecaddr, vecdata;
+
                vecaddr = qib_read_kreg64(dd, 2 * i +
                                  (QIB_7322_MsixTable_OFFS / sizeof(u64)));
                vecdata = qib_read_kreg64(dd, 1 + 2 * i +
@@ -5178,8 +5193,6 @@ static void qib_get_7322_faststats(unsigned long opaque)
                spin_lock_irqsave(&ppd->dd->eep_st_lock, flags);
                traffic_wds -= ppd->dd->traffic_wds;
                ppd->dd->traffic_wds += traffic_wds;
-               if (traffic_wds >= QIB_TRAFFIC_ACTIVE_THRESHOLD)
-                       atomic_add(ACTIVITY_TIMER, &ppd->dd->active_time);
                spin_unlock_irqrestore(&ppd->dd->eep_st_lock, flags);
                if (ppd->cpspec->qdr_dfe_on && (ppd->link_speed_active &
                                                QIB_IB_QDR) &&
@@ -5357,6 +5370,7 @@ static void qib_autoneg_7322_send(struct qib_pportdata *ppd, int which)
 static void set_7322_ibspeed_fast(struct qib_pportdata *ppd, u32 speed)
 {
        u64 newctrlb;
+
        newctrlb = ppd->cpspec->ibcctrl_b & ~(IBA7322_IBC_SPEED_MASK |
                                    IBA7322_IBC_IBTA_1_2_MASK |
                                    IBA7322_IBC_MAX_SPEED_MASK);
@@ -5843,6 +5857,7 @@ static void get_7322_chip_params(struct qib_devdata *dd)
 static void qib_7322_set_baseaddrs(struct qib_devdata *dd)
 {
        u32 cregbase;
+
        cregbase = qib_read_kreg32(dd, kr_counterregbase);
 
        dd->cspec->cregbase = (u64 __iomem *)(cregbase +
@@ -6183,6 +6198,7 @@ static int setup_txselect(const char *str, struct kernel_param *kp)
        struct qib_devdata *dd;
        unsigned long val;
        char *n;
+
        if (strlen(str) >= MAX_ATTEN_LEN) {
                pr_info("txselect_values string too long\n");
                return -ENOSPC;
@@ -6393,6 +6409,7 @@ static void write_7322_initregs(struct qib_devdata *dd)
        val = TIDFLOW_ERRBITS; /* these are W1C */
        for (i = 0; i < dd->cfgctxts; i++) {
                int flow;
+
                for (flow = 0; flow < NUM_TIDFLOWS_CTXT; flow++)
                        qib_write_ureg(dd, ur_rcvflowtable+flow, val, i);
        }
@@ -6503,6 +6520,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
 
        for (pidx = 0; pidx < NUM_IB_PORTS; ++pidx) {
                struct qib_chippport_specific *cp = ppd->cpspec;
+
                ppd->link_speed_supported = features & PORT_SPD_CAP;
                features >>=  PORT_SPD_CAP_SHIFT;
                if (!ppd->link_speed_supported) {
@@ -6581,8 +6599,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
                                ppd->vls_supported = IB_VL_VL0_7;
                        else {
                                qib_devinfo(dd->pcidev,
-                                           "Invalid num_vls %u for MTU %d "
-                                           ", using 4 VLs\n",
+                                           "Invalid num_vls %u for MTU %d , using 4 VLs\n",
                                            qib_num_cfg_vls, mtu);
                                ppd->vls_supported = IB_VL_VL0_3;
                                qib_num_cfg_vls = 4;
@@ -7890,6 +7907,7 @@ static void serdes_7322_los_enable(struct qib_pportdata *ppd, int enable)
 static int serdes_7322_init(struct qib_pportdata *ppd)
 {
        int ret = 0;
+
        if (ppd->dd->cspec->r1)
                ret = serdes_7322_init_old(ppd);
        else
@@ -8305,8 +8323,8 @@ static void force_h1(struct qib_pportdata *ppd)
 
 static int qib_r_grab(struct qib_devdata *dd)
 {
-       u64 val;
-       val = SJA_EN;
+       u64 val = SJA_EN;
+
        qib_write_kreg(dd, kr_r_access, val);
        qib_read_kreg32(dd, kr_scratch);
        return 0;
@@ -8319,6 +8337,7 @@ static int qib_r_wait_for_rdy(struct qib_devdata *dd)
 {
        u64 val;
        int timeout;
+
        for (timeout = 0; timeout < 100 ; ++timeout) {
                val = qib_read_kreg32(dd, kr_r_access);
                if (val & R_RDY)
@@ -8346,6 +8365,7 @@ static int qib_r_shift(struct qib_devdata *dd, int bisten,
                }
                if (inp) {
                        int tdi = inp[pos >> 3] >> (pos & 7);
+
                        val |= ((tdi & 1) << R_TDI_LSB);
                }
                qib_write_kreg(dd, kr_r_access, val);
index 729da39c49ed3fac472db472c3ac58b7766f8ada..2ee36953e234c46ff704bc6e5dfbed8339c09974 100644 (file)
@@ -140,7 +140,7 @@ int qib_create_ctxts(struct qib_devdata *dd)
         * Allocate full ctxtcnt array, rather than just cfgctxts, because
         * cleanup iterates across all possible ctxts.
         */
-       dd->rcd = kzalloc(sizeof(*dd->rcd) * dd->ctxtcnt, GFP_KERNEL);
+       dd->rcd = kcalloc(dd->ctxtcnt, sizeof(*dd->rcd), GFP_KERNEL);
        if (!dd->rcd) {
                qib_dev_err(dd,
                        "Unable to allocate ctxtdata array, failing\n");
@@ -234,6 +234,7 @@ int qib_init_pportdata(struct qib_pportdata *ppd, struct qib_devdata *dd,
                        u8 hw_pidx, u8 port)
 {
        int size;
+
        ppd->dd = dd;
        ppd->hw_pidx = hw_pidx;
        ppd->port = port; /* IB port number, not index */
@@ -613,6 +614,7 @@ static int qib_create_workqueues(struct qib_devdata *dd)
                ppd = dd->pport + pidx;
                if (!ppd->qib_wq) {
                        char wq_name[8]; /* 3 + 2 + 1 + 1 + 1 */
+
                        snprintf(wq_name, sizeof(wq_name), "qib%d_%d",
                                dd->unit, pidx);
                        ppd->qib_wq =
@@ -714,6 +716,7 @@ int qib_init(struct qib_devdata *dd, int reinit)
 
        for (pidx = 0; pidx < dd->num_pports; ++pidx) {
                int mtu;
+
                if (lastfail)
                        ret = lastfail;
                ppd = dd->pport + pidx;
@@ -931,7 +934,6 @@ static void qib_shutdown_device(struct qib_devdata *dd)
                qib_free_pportdata(ppd);
        }
 
-       qib_update_eeprom_log(dd);
 }
 
 /**
@@ -1026,8 +1028,7 @@ static void qib_verify_pioperf(struct qib_devdata *dd)
        addr = vmalloc(cnt);
        if (!addr) {
                qib_devinfo(dd->pcidev,
-                        "Couldn't get memory for checking PIO perf,"
-                        " skipping\n");
+                        "Couldn't get memory for checking PIO perf, skipping\n");
                goto done;
        }
 
@@ -1163,6 +1164,7 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)
 
        if (!qib_cpulist_count) {
                u32 count = num_online_cpus();
+
                qib_cpulist = kzalloc(BITS_TO_LONGS(count) *
                                      sizeof(long), GFP_KERNEL);
                if (qib_cpulist)
@@ -1179,7 +1181,7 @@ bail:
        if (!list_empty(&dd->list))
                list_del_init(&dd->list);
        ib_dealloc_device(&dd->verbs_dev.ibdev);
-       return ERR_PTR(ret);;
+       return ERR_PTR(ret);
 }
 
 /*
index f4918f2165ec72616d51a3bd9c461b33f6ed8548..086616d071b988e38cc813b09a9659d787195c62 100644 (file)
@@ -168,7 +168,6 @@ skip_ibchange:
        ppd->lastibcstat = ibcs;
        if (ev)
                signal_ib_event(ppd, ev);
-       return;
 }
 
 void qib_clear_symerror_on_linkup(unsigned long opaque)
index 3b9afccaaade824370f5c0ea0d6d6ceb519e6090..ad843c786e7212d0c89f90264bacb5cb6b8346a0 100644 (file)
@@ -122,10 +122,10 @@ void qib_free_lkey(struct qib_mregion *mr)
        if (!mr->lkey_published)
                goto out;
        if (lkey == 0)
-               rcu_assign_pointer(dev->dma_mr, NULL);
+               RCU_INIT_POINTER(dev->dma_mr, NULL);
        else {
                r = lkey >> (32 - ib_qib_lkey_table_size);
-               rcu_assign_pointer(rkt->table[r], NULL);
+               RCU_INIT_POINTER(rkt->table[r], NULL);
        }
        qib_put_mr(mr);
        mr->lkey_published = 0;
index 636be117b57859e690fdb0704be6fe10b429511f..395f4046dba2054633ad41f4a0f67a4dbee57c63 100644 (file)
@@ -152,14 +152,14 @@ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl,
        data.trap_num = trap_num;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_257_258.lid1 = lid1;
        data.details.ntc_257_258.lid2 = lid2;
        data.details.ntc_257_258.key = cpu_to_be32(key);
        data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1);
        data.details.ntc_257_258.qp2 = cpu_to_be32(qp2);
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -176,7 +176,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
        data.trap_num = IB_NOTICE_TRAP_BAD_MKEY;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_256.lid = data.issuer_lid;
        data.details.ntc_256.method = smp->method;
        data.details.ntc_256.attr_id = smp->attr_id;
@@ -198,7 +198,7 @@ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp)
                       hop_cnt);
        }
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -214,11 +214,11 @@ void qib_cap_mask_chg(struct qib_ibport *ibp)
        data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_144.lid = data.issuer_lid;
        data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags);
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -234,11 +234,11 @@ void qib_sys_guid_chg(struct qib_ibport *ibp)
        data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_145.lid = data.issuer_lid;
        data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid;
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 /*
@@ -254,12 +254,12 @@ void qib_node_desc_chg(struct qib_ibport *ibp)
        data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG;
        data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid);
        data.toggle_count = 0;
-       memset(&data.details, 0, sizeof data.details);
+       memset(&data.details, 0, sizeof(data.details));
        data.details.ntc_144.lid = data.issuer_lid;
        data.details.ntc_144.local_changes = 1;
        data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG;
 
-       qib_send_trap(ibp, &data, sizeof data);
+       qib_send_trap(ibp, &data, sizeof(data));
 }
 
 static int subn_get_nodedescription(struct ib_smp *smp,
index 8b73a11d571c1671ba6678565c5e34754a3ab411..146cf29a2e1db19a8293f2ecbf3f8348ce1732c2 100644 (file)
@@ -134,7 +134,7 @@ struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev,
                                           void *obj) {
        struct qib_mmap_info *ip;
 
-       ip = kmalloc(sizeof *ip, GFP_KERNEL);
+       ip = kmalloc(sizeof(*ip), GFP_KERNEL);
        if (!ip)
                goto bail;
 
index a77fb4fb14e43c255e23a41b7c86877835a78ea3..c4473db46699b5f367a0fd1b9d9df92bb2723d50 100644 (file)
@@ -55,7 +55,7 @@ static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd,
 
        m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
        for (; i < m; i++) {
-               mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL);
+               mr->map[i] = kzalloc(sizeof(*mr->map[0]), GFP_KERNEL);
                if (!mr->map[i])
                        goto bail;
        }
@@ -104,7 +104,7 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc)
                goto bail;
        }
 
-       mr = kzalloc(sizeof *mr, GFP_KERNEL);
+       mr = kzalloc(sizeof(*mr), GFP_KERNEL);
        if (!mr) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -143,7 +143,7 @@ static struct qib_mr *alloc_mr(int count, struct ib_pd *pd)
 
        /* Allocate struct plus pointers to first level page tables. */
        m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ;
-       mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL);
+       mr = kzalloc(sizeof(*mr) + m * sizeof(mr->mr.map[0]), GFP_KERNEL);
        if (!mr)
                goto bail;
 
@@ -347,7 +347,7 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len)
        if (size > PAGE_SIZE)
                return ERR_PTR(-EINVAL);
 
-       pl = kzalloc(sizeof *pl, GFP_KERNEL);
+       pl = kzalloc(sizeof(*pl), GFP_KERNEL);
        if (!pl)
                return ERR_PTR(-ENOMEM);
 
@@ -386,7 +386,7 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags,
 
        /* Allocate struct plus pointers to first level page tables. */
        m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ;
-       fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL);
+       fmr = kzalloc(sizeof(*fmr) + m * sizeof(fmr->mr.map[0]), GFP_KERNEL);
        if (!fmr)
                goto bail;
 
index 61a0046efb76ff9310b9a15f79dfe9dd11354c05..4758a3801ae8f916b6a96988b6c88ce5c344c588 100644 (file)
@@ -210,7 +210,7 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt,
        /* We can't pass qib_msix_entry array to qib_msix_setup
         * so use a dummy msix_entry array and copy the allocated
         * irq back to the qib_msix_entry array. */
-       msix_entry = kmalloc(nvec * sizeof(*msix_entry), GFP_KERNEL);
+       msix_entry = kcalloc(nvec, sizeof(*msix_entry), GFP_KERNEL);
        if (!msix_entry)
                goto do_intx;
 
@@ -234,8 +234,10 @@ free_msix_entry:
        kfree(msix_entry);
 
 do_intx:
-       qib_dev_err(dd, "pci_enable_msix_range %d vectors failed: %d, "
-                       "falling back to INTx\n", nvec, ret);
+       qib_dev_err(
+               dd,
+               "pci_enable_msix_range %d vectors failed: %d, falling back to INTx\n",
+               nvec, ret);
        *msixcnt = 0;
        qib_enable_intx(dd->pcidev);
 }
@@ -459,6 +461,7 @@ void qib_pcie_getcmd(struct qib_devdata *dd, u16 *cmd, u8 *iline, u8 *cline)
 void qib_pcie_reenable(struct qib_devdata *dd, u16 cmd, u8 iline, u8 cline)
 {
        int r;
+
        r = pci_write_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0,
                                   dd->pcibar0);
        if (r)
@@ -696,6 +699,7 @@ static void
 qib_pci_resume(struct pci_dev *pdev)
 {
        struct qib_devdata *dd = pci_get_drvdata(pdev);
+
        qib_devinfo(pdev, "QIB resume function called\n");
        pci_cleanup_aer_uncorrect_error_status(pdev);
        /*
index 6ddc0264aad2779ef327161c14a2ee9aef2e543f..4fa88ba2963e6ba21186ae5eb095ea531b741e97 100644 (file)
@@ -255,10 +255,10 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
 
        if (rcu_dereference_protected(ibp->qp0,
                        lockdep_is_held(&dev->qpt_lock)) == qp) {
-               rcu_assign_pointer(ibp->qp0, NULL);
+               RCU_INIT_POINTER(ibp->qp0, NULL);
        } else if (rcu_dereference_protected(ibp->qp1,
                        lockdep_is_held(&dev->qpt_lock)) == qp) {
-               rcu_assign_pointer(ibp->qp1, NULL);
+               RCU_INIT_POINTER(ibp->qp1, NULL);
        } else {
                struct qib_qp *q;
                struct qib_qp __rcu **qpp;
@@ -269,7 +269,7 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)
                                lockdep_is_held(&dev->qpt_lock))) != NULL;
                                qpp = &q->next)
                        if (q == qp) {
-                               rcu_assign_pointer(*qpp,
+                               RCU_INIT_POINTER(*qpp,
                                        rcu_dereference_protected(qp->next,
                                         lockdep_is_held(&dev->qpt_lock)));
                                removed = 1;
@@ -315,7 +315,7 @@ unsigned qib_free_all_qps(struct qib_devdata *dd)
        for (n = 0; n < dev->qp_table_size; n++) {
                qp = rcu_dereference_protected(dev->qp_table[n],
                        lockdep_is_held(&dev->qpt_lock));
-               rcu_assign_pointer(dev->qp_table[n], NULL);
+               RCU_INIT_POINTER(dev->qp_table[n], NULL);
 
                for (; qp; qp = rcu_dereference_protected(qp->next,
                                        lockdep_is_held(&dev->qpt_lock)))
index fa71b1e666c5414fbba2357fe531e75cabc7986c..5e27f76805e28af0c0e0acdc9864360f8cf09b41 100644 (file)
@@ -81,7 +81,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
         * Module could take up to 2 Msec to respond to MOD_SEL, and there
         * is no way to tell if it is ready, so we must wait.
         */
-       msleep(2);
+       msleep(20);
 
        /* Make sure TWSI bus is in sane state. */
        ret = qib_twsi_reset(dd);
@@ -99,6 +99,7 @@ static int qsfp_read(struct qib_pportdata *ppd, int addr, void *bp, int len)
        while (cnt < len) {
                unsigned in_page;
                int wlen = len - cnt;
+
                in_page = addr % QSFP_PAGESIZE;
                if ((in_page + wlen) > QSFP_PAGESIZE)
                        wlen = QSFP_PAGESIZE - in_page;
@@ -139,7 +140,7 @@ deselect:
        else if (pass)
                qib_dev_porterr(dd, ppd->port, "QSFP retries: %d\n", pass);
 
-       msleep(2);
+       msleep(20);
 
 bail:
        mutex_unlock(&dd->eep_lock);
@@ -189,7 +190,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
         * Module could take up to 2 Msec to respond to MOD_SEL,
         * and there is no way to tell if it is ready, so we must wait.
         */
-       msleep(2);
+       msleep(20);
 
        /* Make sure TWSI bus is in sane state. */
        ret = qib_twsi_reset(dd);
@@ -206,6 +207,7 @@ static int qib_qsfp_write(struct qib_pportdata *ppd, int addr, void *bp,
        while (cnt < len) {
                unsigned in_page;
                int wlen = len - cnt;
+
                in_page = addr % QSFP_PAGESIZE;
                if ((in_page + wlen) > QSFP_PAGESIZE)
                        wlen = QSFP_PAGESIZE - in_page;
@@ -234,7 +236,7 @@ deselect:
         * going away, and there is no way to tell if it is ready.
         * so we must wait.
         */
-       msleep(2);
+       msleep(20);
 
 bail:
        mutex_unlock(&dd->eep_lock);
@@ -296,6 +298,7 @@ int qib_refresh_qsfp_cache(struct qib_pportdata *ppd, struct qib_qsfp_cache *cp)
                 * set the page to zero, Even if it already appears to be zero.
                 */
                u8 poke = 0;
+
                ret = qib_qsfp_write(ppd, 127, &poke, 1);
                udelay(50);
                if (ret != 1) {
@@ -480,7 +483,6 @@ void qib_qsfp_init(struct qib_qsfp_data *qd,
        udelay(20); /* Generous RST dwell */
 
        dd->f_gpio_mod(dd, mask, mask, mask);
-       return;
 }
 
 void qib_qsfp_deinit(struct qib_qsfp_data *qd)
@@ -540,6 +542,7 @@ int qib_qsfp_dump(struct qib_pportdata *ppd, char *buf, int len)
 
        while (bidx < QSFP_DEFAULT_HDR_CNT) {
                int iidx;
+
                ret = qsfp_read(ppd, bidx, bin_buff, QSFP_DUMP_CHUNK);
                if (ret < 0)
                        goto bail;
index 2f2501890c4ea2b26a9ebd7a82755688605443e3..4544d6f88ad77c7f7c69fd6e6d4a138188f493b3 100644 (file)
@@ -1017,7 +1017,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr)
                /* Post a send completion queue entry if requested. */
                if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
                    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-                       memset(&wc, 0, sizeof wc);
+                       memset(&wc, 0, sizeof(wc));
                        wc.wr_id = wqe->wr.wr_id;
                        wc.status = IB_WC_SUCCESS;
                        wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
@@ -1073,7 +1073,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp,
                /* Post a send completion queue entry if requested. */
                if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) ||
                    (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
-                       memset(&wc, 0, sizeof wc);
+                       memset(&wc, 0, sizeof(wc));
                        wc.wr_id = wqe->wr.wr_id;
                        wc.status = IB_WC_SUCCESS;
                        wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
index 4c07a8b34ffe27e2bfa89a19883fa25ef6aa64e7..f42bd0f47577a4557f47cac58de4a16b678b923d 100644 (file)
@@ -247,8 +247,8 @@ static __be64 get_sguid(struct qib_ibport *ibp, unsigned index)
                struct qib_pportdata *ppd = ppd_from_ibp(ibp);
 
                return ppd->guid;
-       } else
-               return ibp->guids[index - 1];
+       }
+       return ibp->guids[index - 1];
 }
 
 static int gid_ok(union ib_gid *gid, __be64 gid_prefix, __be64 id)
@@ -420,7 +420,7 @@ again:
                goto serr;
        }
 
-       memset(&wc, 0, sizeof wc);
+       memset(&wc, 0, sizeof(wc));
        send_status = IB_WC_SUCCESS;
 
        release = 1;
@@ -792,7 +792,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe,
            status != IB_WC_SUCCESS) {
                struct ib_wc wc;
 
-               memset(&wc, 0, sizeof wc);
+               memset(&wc, 0, sizeof(wc));
                wc.wr_id = wqe->wr.wr_id;
                wc.status = status;
                wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode];
index 911205d3d5a0bf255fae65126740ab47eaa4a8ef..c72775f2721226868604b3770b1dbe23b7456dc0 100644 (file)
@@ -259,6 +259,7 @@ static int qib_ibsd_reset(struct qib_devdata *dd, int assert_rst)
                 * it again during startup.
                 */
                u64 val;
+
                rst_val &= ~(1ULL);
                qib_write_kreg(dd, kr_hwerrmask,
                               dd->cspec->hwerrmask &
@@ -590,6 +591,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
                 * Both should be clear
                 */
                u64 newval = 0;
+
                qib_write_kreg(dd, acc, newval);
                /* First read after write is not trustworthy */
                pollval = qib_read_kreg32(dd, acc);
@@ -601,6 +603,7 @@ static int epb_access(struct qib_devdata *dd, int sdnum, int claim)
                /* Need to claim */
                u64 pollval;
                u64 newval = EPB_ACC_REQ | oct_sel;
+
                qib_write_kreg(dd, acc, newval);
                /* First read after write is not trustworthy */
                pollval = qib_read_kreg32(dd, acc);
@@ -812,6 +815,7 @@ static int qib_sd7220_ram_xfer(struct qib_devdata *dd, int sdnum, u32 loc,
                        if (!sofar) {
                                /* Only set address at start of chunk */
                                int addrbyte = (addr + sofar) >> 8;
+
                                transval = csbit | EPB_MADDRH | addrbyte;
                                tries = epb_trans(dd, trans, transval,
                                                  &transval);
@@ -922,7 +926,7 @@ qib_sd7220_ib_vfy(struct qib_devdata *dd, const struct firmware *fw)
  * IRQ not set up at this point in init, so we poll.
  */
 #define IB_SERDES_TRIM_DONE (1ULL << 11)
-#define TRIM_TMO (30)
+#define TRIM_TMO (15)
 
 static int qib_sd_trimdone_poll(struct qib_devdata *dd)
 {
@@ -940,7 +944,7 @@ static int qib_sd_trimdone_poll(struct qib_devdata *dd)
                        ret = 1;
                        break;
                }
-               msleep(10);
+               msleep(20);
        }
        if (trim_tmo >= TRIM_TMO) {
                qib_dev_err(dd, "No TRIMDONE in %d tries\n", trim_tmo);
@@ -1071,6 +1075,7 @@ static int qib_sd_setvals(struct qib_devdata *dd)
                dds_reg_map >>= 4;
                for (midx = 0; midx < DDS_ROWS; ++midx) {
                        u64 __iomem *daddr = taddr + ((midx << 4) + idx);
+
                        data = dds_init_vals[midx].reg_vals[idx];
                        writeq(data, daddr);
                        mmiowb();
index 3c8e4e3caca6240175bbddb35fb304107179920e..81f56cdff2bc280c7a64f816a215b10b703d0ec4 100644 (file)
@@ -586,8 +586,8 @@ static ssize_t show_serial(struct device *device,
                container_of(device, struct qib_ibdev, ibdev.dev);
        struct qib_devdata *dd = dd_from_dev(dev);
 
-       buf[sizeof dd->serial] = '\0';
-       memcpy(buf, dd->serial, sizeof dd->serial);
+       buf[sizeof(dd->serial)] = '\0';
+       memcpy(buf, dd->serial, sizeof(dd->serial));
        strcat(buf, "\n");
        return strlen(buf);
 }
@@ -611,28 +611,6 @@ bail:
        return ret < 0 ? ret : count;
 }
 
-static ssize_t show_logged_errs(struct device *device,
-                               struct device_attribute *attr, char *buf)
-{
-       struct qib_ibdev *dev =
-               container_of(device, struct qib_ibdev, ibdev.dev);
-       struct qib_devdata *dd = dd_from_dev(dev);
-       int idx, count;
-
-       /* force consistency with actual EEPROM */
-       if (qib_update_eeprom_log(dd) != 0)
-               return -ENXIO;
-
-       count = 0;
-       for (idx = 0; idx < QIB_EEP_LOG_CNT; ++idx) {
-               count += scnprintf(buf + count, PAGE_SIZE - count, "%d%c",
-                                  dd->eep_st_errs[idx],
-                                  idx == (QIB_EEP_LOG_CNT - 1) ? '\n' : ' ');
-       }
-
-       return count;
-}
-
 /*
  * Dump tempsense regs. in decimal, to ease shell-scripts.
  */
@@ -679,7 +657,6 @@ static DEVICE_ATTR(nctxts, S_IRUGO, show_nctxts, NULL);
 static DEVICE_ATTR(nfreectxts, S_IRUGO, show_nfreectxts, NULL);
 static DEVICE_ATTR(serial, S_IRUGO, show_serial, NULL);
 static DEVICE_ATTR(boardversion, S_IRUGO, show_boardversion, NULL);
-static DEVICE_ATTR(logged_errors, S_IRUGO, show_logged_errs, NULL);
 static DEVICE_ATTR(tempsense, S_IRUGO, show_tempsense, NULL);
 static DEVICE_ATTR(localbus_info, S_IRUGO, show_localbus_info, NULL);
 static DEVICE_ATTR(chip_reset, S_IWUSR, NULL, store_chip_reset);
@@ -693,7 +670,6 @@ static struct device_attribute *qib_attributes[] = {
        &dev_attr_nfreectxts,
        &dev_attr_serial,
        &dev_attr_boardversion,
-       &dev_attr_logged_errors,
        &dev_attr_tempsense,
        &dev_attr_localbus_info,
        &dev_attr_chip_reset,
index 647f7beb1b0a1669a841c8180a9d6f4f1722c4c9..f5698664419b430ac932fb396f7a35c11ca80816 100644 (file)
@@ -105,6 +105,7 @@ static void scl_out(struct qib_devdata *dd, u8 bit)
                udelay(2);
        else {
                int rise_usec;
+
                for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
                        if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
                                break;
@@ -326,6 +327,7 @@ int qib_twsi_reset(struct qib_devdata *dd)
 static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
 {
        int ret = 1;
+
        if (flags & QIB_TWSI_START)
                start_seq(dd);
 
@@ -435,8 +437,7 @@ int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
        int sub_len;
        const u8 *bp = buffer;
        int max_wait_time, i;
-       int ret;
-       ret = 1;
+       int ret = 1;
 
        while (len > 0) {
                if (dev == QIB_TWSI_NO_DEV) {
index 31d3561400a49f6056eb6be5c082b05cc0edb0e4..eface3b3dacf6a3d49c6d67b6702de7d38abc8ab 100644 (file)
@@ -180,6 +180,7 @@ void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
 
        for (i = 0; i < cnt; i++) {
                int which;
+
                if (!test_bit(i, mask))
                        continue;
                /*
index aaf7039f8ed2112041174d1b320a3d8205348c08..26243b722b5e979c1324471b6e17871d3ef22540 100644 (file)
@@ -127,7 +127,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe)
         * present on the wire.
         */
        length = swqe->length;
-       memset(&wc, 0, sizeof wc);
+       memset(&wc, 0, sizeof(wc));
        wc.byte_len = length + sizeof(struct ib_grh);
 
        if (swqe->wr.opcode == IB_WR_SEND_WITH_IMM) {
index d2806cae234c254ef7e71ee58e03df5d2c1efdc9..3e0677c512768a7bd79a612fae251f0af70db512 100644 (file)
@@ -50,7 +50,7 @@
 /* expected size of headers (for dma_pool) */
 #define QIB_USER_SDMA_EXP_HEADER_LENGTH 64
 /* attempt to drain the queue for 5secs */
-#define QIB_USER_SDMA_DRAIN_TIMEOUT 500
+#define QIB_USER_SDMA_DRAIN_TIMEOUT 250
 
 /*
  * track how many times a process open this driver.
@@ -226,6 +226,7 @@ qib_user_sdma_queue_create(struct device *dev, int unit, int ctxt, int sctxt)
                sdma_rb_node->refcount++;
        } else {
                int ret;
+
                sdma_rb_node = kmalloc(sizeof(
                        struct qib_user_sdma_rb_node), GFP_KERNEL);
                if (!sdma_rb_node)
@@ -936,6 +937,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd,
 
                        if (tiddma) {
                                char *tidsm = (char *)pkt + pktsize;
+
                                cfur = copy_from_user(tidsm,
                                        iov[idx].iov_base, tidsmsize);
                                if (cfur) {
@@ -1142,7 +1144,7 @@ void qib_user_sdma_queue_drain(struct qib_pportdata *ppd,
                qib_user_sdma_hwqueue_clean(ppd);
                qib_user_sdma_queue_clean(ppd, pq);
                mutex_unlock(&pq->lock);
-               msleep(10);
+               msleep(20);
        }
 
        if (pq->num_pending || pq->num_sending) {
@@ -1316,8 +1318,6 @@ retry:
 
        if (nfree && !list_empty(pktlist))
                goto retry;
-
-       return;
 }
 
 /* pq->lock must be held, get packets on the wire... */
index 9bcfbd8429804e237b23555a54bbd2b1d0fc5a27..4a3599890ea5f114655a34e472bee9197d73bd2c 100644 (file)
@@ -1342,6 +1342,7 @@ static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr,
 done:
        if (dd->flags & QIB_USE_SPCL_TRIG) {
                u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023;
+
                qib_flush_wc();
                __raw_writel(0xaebecede, piobuf_orig + spcl_off);
        }
@@ -1744,7 +1745,7 @@ static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev,
         * we allow allocations of more than we report for this value.
         */
 
-       pd = kmalloc(sizeof *pd, GFP_KERNEL);
+       pd = kmalloc(sizeof(*pd), GFP_KERNEL);
        if (!pd) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -1829,7 +1830,7 @@ static struct ib_ah *qib_create_ah(struct ib_pd *pd,
                goto bail;
        }
 
-       ah = kmalloc(sizeof *ah, GFP_ATOMIC);
+       ah = kmalloc(sizeof(*ah), GFP_ATOMIC);
        if (!ah) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -1862,7 +1863,7 @@ struct ib_ah *qib_create_qp0_ah(struct qib_ibport *ibp, u16 dlid)
        struct ib_ah *ah = ERR_PTR(-EINVAL);
        struct qib_qp *qp0;
 
-       memset(&attr, 0, sizeof attr);
+       memset(&attr, 0, sizeof(attr));
        attr.dlid = dlid;
        attr.port_num = ppd_from_ibp(ibp)->port;
        rcu_read_lock();
@@ -1977,7 +1978,7 @@ static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev,
        struct qib_ucontext *context;
        struct ib_ucontext *ret;
 
-       context = kmalloc(sizeof *context, GFP_KERNEL);
+       context = kmalloc(sizeof(*context), GFP_KERNEL);
        if (!context) {
                ret = ERR_PTR(-ENOMEM);
                goto bail;
@@ -2054,7 +2055,9 @@ int qib_register_ib_device(struct qib_devdata *dd)
 
        dev->qp_table_size = ib_qib_qp_table_size;
        get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd));
-       dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table,
+       dev->qp_table = kmalloc_array(
+                               dev->qp_table_size,
+                               sizeof(*dev->qp_table),
                                GFP_KERNEL);
        if (!dev->qp_table) {
                ret = -ENOMEM;
@@ -2122,7 +2125,7 @@ int qib_register_ib_device(struct qib_devdata *dd)
        for (i = 0; i < ppd->sdma_descq_cnt; i++) {
                struct qib_verbs_txreq *tx;
 
-               tx = kzalloc(sizeof *tx, GFP_KERNEL);
+               tx = kzalloc(sizeof(*tx), GFP_KERNEL);
                if (!tx) {
                        ret = -ENOMEM;
                        goto err_tx;
index dabb697b1c2a6025ae72927740582a1cb277f796..f8ea069a3eafca4078ab70848c3804867609143c 100644 (file)
@@ -43,7 +43,7 @@ static struct qib_mcast_qp *qib_mcast_qp_alloc(struct qib_qp *qp)
 {
        struct qib_mcast_qp *mqp;
 
-       mqp = kmalloc(sizeof *mqp, GFP_KERNEL);
+       mqp = kmalloc(sizeof(*mqp), GFP_KERNEL);
        if (!mqp)
                goto bail;
 
@@ -75,7 +75,7 @@ static struct qib_mcast *qib_mcast_alloc(union ib_gid *mgid)
 {
        struct qib_mcast *mcast;
 
-       mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
+       mcast = kmalloc(sizeof(*mcast), GFP_KERNEL);
        if (!mcast)
                goto bail;
 
index 1d7281c5a02eaf633fed9d4b71d4bec2b9433915..81b225f2300aed34eab9b88077bd4c4097dceae2 100644 (file)
@@ -72,6 +72,7 @@ int qib_enable_wc(struct qib_devdata *dd)
        if (dd->piobcnt2k && dd->piobcnt4k) {
                /* 2 sizes for chip */
                unsigned long pio2kbase, pio4kbase;
+
                pio2kbase = dd->piobufbase & 0xffffffffUL;
                pio4kbase = (dd->piobufbase >> 32) & 0xffffffffUL;
                if (pio2kbase < pio4kbase) {
@@ -91,7 +92,7 @@ int qib_enable_wc(struct qib_devdata *dd)
        }
 
        for (bits = 0; !(piolen & (1ULL << bits)); bits++)
-               /* do nothing */ ;
+               ; /* do nothing */
 
        if (piolen != (1ULL << bits)) {
                piolen >>= bits;
@@ -100,8 +101,8 @@ int qib_enable_wc(struct qib_devdata *dd)
                piolen = 1ULL << (bits + 1);
        }
        if (pioaddr & (piolen - 1)) {
-               u64 atmp;
-               atmp = pioaddr & ~(piolen - 1);
+               u64 atmp = pioaddr & ~(piolen - 1);
+
                if (atmp < addr || (atmp + piolen) > (addr + len)) {
                        qib_dev_err(dd,
                                "No way to align address/size (%llx/%llx), no WC mtrr\n",
index 5ce26817e7e1d9b8d43126518188b3769e0c7f44..b47aea1094b2d9f7e434cf8a442f4b83b1a28f3c 100644 (file)
@@ -654,7 +654,9 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
                           enum dma_data_direction dma_dir);
 
 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
-                             struct iser_data_buf *data);
+                             struct iser_data_buf *data,
+                             enum dma_data_direction dir);
+
 int  iser_initialize_task_headers(struct iscsi_task *task,
                        struct iser_tx_desc *tx_desc);
 int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
index 3821633f1065b15a9fcc1b1c36a74eda619aeae3..20e859a6f1a63b2fede51dcb2b02b9b6f638a36e 100644 (file)
@@ -320,9 +320,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
        struct ib_conn *ib_conn = &iser_conn->ib_conn;
        struct iser_device *device = ib_conn->device;
 
-       if (!iser_conn->rx_descs)
-               goto free_login_buf;
-
        if (device->iser_free_rdma_reg_res)
                device->iser_free_rdma_reg_res(ib_conn);
 
@@ -334,7 +331,6 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
        /* make sure we never redo any unmapping */
        iser_conn->rx_descs = NULL;
 
-free_login_buf:
        iser_free_login_buf(iser_conn);
 }
 
@@ -714,19 +710,23 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
                device->iser_unreg_rdma_mem(iser_task, ISER_DIR_IN);
                if (is_rdma_data_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->data[ISER_DIR_IN]);
+                                                &iser_task->data[ISER_DIR_IN],
+                                                DMA_FROM_DEVICE);
                if (prot_count && is_rdma_prot_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->prot[ISER_DIR_IN]);
+                                                &iser_task->prot[ISER_DIR_IN],
+                                                DMA_FROM_DEVICE);
        }
 
        if (iser_task->dir[ISER_DIR_OUT]) {
                device->iser_unreg_rdma_mem(iser_task, ISER_DIR_OUT);
                if (is_rdma_data_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->data[ISER_DIR_OUT]);
+                                                &iser_task->data[ISER_DIR_OUT],
+                                                DMA_TO_DEVICE);
                if (prot_count && is_rdma_prot_aligned)
                        iser_dma_unmap_task_data(iser_task,
-                                                &iser_task->prot[ISER_DIR_OUT]);
+                                                &iser_task->prot[ISER_DIR_OUT],
+                                                DMA_TO_DEVICE);
        }
 }
index abce9339333f0a8551a2e52600d2409edd39461c..341040bf09849d41e4e01c613e0d0e01683fceda 100644 (file)
@@ -332,12 +332,13 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
 }
 
 void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
-                             struct iser_data_buf *data)
+                             struct iser_data_buf *data,
+                             enum dma_data_direction dir)
 {
        struct ib_device *dev;
 
        dev = iser_task->iser_conn->ib_conn.device->ib_device;
-       ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
+       ib_dma_unmap_sg(dev, data->buf, data->size, dir);
 }
 
 static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
@@ -357,7 +358,9 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
                iser_data_buf_dump(mem, ibdev);
 
        /* unmap the command data before accessing it */
-       iser_dma_unmap_task_data(iser_task, mem);
+       iser_dma_unmap_task_data(iser_task, mem,
+                                (cmd_dir == ISER_DIR_OUT) ?
+                                DMA_TO_DEVICE : DMA_FROM_DEVICE);
 
        /* allocate copy buf, if we are writing, copy the */
        /* unaligned scatterlist, dma map the copy        */
index 695a2704bd4380acafbc04c4ae7f0ecd96bc95a0..4065abe28829f78356c5ee7cd76ffa7b7330b3d5 100644 (file)
@@ -600,16 +600,16 @@ void iser_release_work(struct work_struct *work)
 /**
  * iser_free_ib_conn_res - release IB related resources
  * @iser_conn: iser connection struct
- * @destroy_device: indicator if we need to try to release
- *     the iser device (only iscsi shutdown and DEVICE_REMOVAL
- *     will use this.
+ * @destroy: indicator if we need to try to release the
+ *     iser device and memory regoins pool (only iscsi
+ *     shutdown and DEVICE_REMOVAL will use this).
  *
  * This routine is called with the iser state mutex held
  * so the cm_id removal is out of here. It is Safe to
  * be invoked multiple times.
  */
 static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
-                                 bool destroy_device)
+                                 bool destroy)
 {
        struct ib_conn *ib_conn = &iser_conn->ib_conn;
        struct iser_device *device = ib_conn->device;
@@ -617,17 +617,20 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn,
        iser_info("freeing conn %p cma_id %p qp %p\n",
                  iser_conn, ib_conn->cma_id, ib_conn->qp);
 
-       iser_free_rx_descriptors(iser_conn);
-
        if (ib_conn->qp != NULL) {
                ib_conn->comp->active_qps--;
                rdma_destroy_qp(ib_conn->cma_id);
                ib_conn->qp = NULL;
        }
 
-       if (destroy_device && device != NULL) {
-               iser_device_try_release(device);
-               ib_conn->device = NULL;
+       if (destroy) {
+               if (iser_conn->rx_descs)
+                       iser_free_rx_descriptors(iser_conn);
+
+               if (device != NULL) {
+                       iser_device_try_release(device);
+                       ib_conn->device = NULL;
+               }
        }
 }
 
@@ -643,9 +646,11 @@ void iser_conn_release(struct iser_conn *iser_conn)
        mutex_unlock(&ig.connlist_mutex);
 
        mutex_lock(&iser_conn->state_mutex);
+       /* In case we endup here without ep_disconnect being invoked. */
        if (iser_conn->state != ISER_CONN_DOWN) {
                iser_warn("iser conn %p state %d, expected state down.\n",
                          iser_conn, iser_conn->state);
+               iscsi_destroy_endpoint(iser_conn->ep);
                iser_conn->state = ISER_CONN_DOWN;
        }
        /*
@@ -840,7 +845,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
 }
 
 static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
-                                bool destroy_device)
+                                bool destroy)
 {
        struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context;
 
@@ -850,7 +855,7 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id,
         * and flush errors.
         */
        iser_disconnected_handler(cma_id);
-       iser_free_ib_conn_res(iser_conn, destroy_device);
+       iser_free_ib_conn_res(iser_conn, destroy);
        complete(&iser_conn->ib_completion);
 };
 
index dafb3c531f96f7ae61e70ff9f37d9324bee687ff..075b19cc78e89d11d73ba19bcffcd68c18323907 100644 (file)
@@ -38,7 +38,7 @@
 #define ISER_MAX_CQ_LEN                (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
                                 ISERT_MAX_CONN)
 
-int isert_debug_level = 0;
+static int isert_debug_level;
 module_param_named(debug_level, isert_debug_level, int, 0644);
 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
 
@@ -949,7 +949,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
                isert_err("ib_post_recv() failed with ret: %d\n", ret);
                isert_conn->post_recv_buf_count -= count;
        } else {
-               isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count);
+               isert_dbg("Posted %d RX buffers\n", count);
                isert_conn->conn_rx_desc_head = rx_head;
        }
        return ret;
@@ -1351,17 +1351,19 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
        struct iscsi_conn *conn = isert_conn->conn;
        u32 payload_length = ntoh24(hdr->dlength);
        int rc;
-       unsigned char *text_in;
+       unsigned char *text_in = NULL;
 
        rc = iscsit_setup_text_cmd(conn, cmd, hdr);
        if (rc < 0)
                return rc;
 
-       text_in = kzalloc(payload_length, GFP_KERNEL);
-       if (!text_in) {
-               isert_err("Unable to allocate text_in of payload_length: %u\n",
-                         payload_length);
-               return -ENOMEM;
+       if (payload_length) {
+               text_in = kzalloc(payload_length, GFP_KERNEL);
+               if (!text_in) {
+                       isert_err("Unable to allocate text_in of payload_length: %u\n",
+                                 payload_length);
+                       return -ENOMEM;
+               }
        }
        cmd->text_in_ptr = text_in;
 
@@ -1434,9 +1436,15 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
                ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
                break;
        case ISCSI_OP_TEXT:
-               cmd = isert_allocate_cmd(conn);
-               if (!cmd)
-                       break;
+               if (be32_to_cpu(hdr->ttt) != 0xFFFFFFFF) {
+                       cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+                       if (!cmd)
+                               break;
+               } else {
+                       cmd = isert_allocate_cmd(conn);
+                       if (!cmd)
+                               break;
+               }
 
                isert_cmd = iscsit_priv_cmd(cmd);
                ret = isert_handle_text_cmd(isert_conn, isert_cmd, cmd,
@@ -1658,6 +1666,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
        struct isert_conn *isert_conn = isert_cmd->conn;
        struct iscsi_conn *conn = isert_conn->conn;
        struct isert_device *device = isert_conn->conn_device;
+       struct iscsi_text_rsp *hdr;
 
        isert_dbg("Cmd %p\n", isert_cmd);
 
@@ -1698,6 +1707,11 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
        case ISCSI_OP_REJECT:
        case ISCSI_OP_NOOP_OUT:
        case ISCSI_OP_TEXT:
+               hdr = (struct iscsi_text_rsp *)&isert_cmd->tx_desc.iscsi_header;
+               /* If the continue bit is on, keep the command alive */
+               if (hdr->flags & ISCSI_FLAG_TEXT_CONTINUE)
+                       break;
+
                spin_lock_bh(&conn->cmd_lock);
                if (!list_empty(&cmd->i_conn_node))
                        list_del_init(&cmd->i_conn_node);
@@ -1709,8 +1723,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
                 * associated cmd->se_cmd needs to be released.
                 */
                if (cmd->se_cmd.se_tfo != NULL) {
-                       isert_dbg("Calling transport_generic_free_cmd from"
-                                " isert_put_cmd for 0x%02x\n",
+                       isert_dbg("Calling transport_generic_free_cmd for 0x%02x\n",
                                 cmd->iscsi_opcode);
                        transport_generic_free_cmd(&cmd->se_cmd, 0);
                        break;
@@ -2275,7 +2288,7 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        }
        isert_init_send_wr(isert_conn, isert_cmd, send_wr);
 
-       isert_dbg("conn %p Text Reject\n", isert_conn);
+       isert_dbg("conn %p Text Response\n", isert_conn);
 
        return isert_post_response(isert_conn, isert_cmd);
 }
@@ -3136,7 +3149,7 @@ accept_wait:
        spin_lock_bh(&np->np_thread_lock);
        if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
                spin_unlock_bh(&np->np_thread_lock);
-               isert_dbg("np_thread_state %d for isert_accept_np\n",
+               isert_dbg("np_thread_state %d\n",
                         np->np_thread_state);
                /**
                 * No point in stalling here when np_thread
@@ -3320,7 +3333,8 @@ static int __init isert_init(void)
 {
        int ret;
 
-       isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
+       isert_comp_wq = alloc_workqueue("isert_comp_wq",
+                                       WQ_UNBOUND | WQ_HIGHPRI, 0);
        if (!isert_comp_wq) {
                isert_err("Unable to allocate isert_comp_wq\n");
                ret = -ENOMEM;
index eb694ddad79fe069b6d2f796004a5e8acc77e833..6e0a477681e90b0efe53330de1b9118000bcc85a 100644 (file)
@@ -3518,7 +3518,7 @@ static void srpt_close_session(struct se_session *se_sess)
        DECLARE_COMPLETION_ONSTACK(release_done);
        struct srpt_rdma_ch *ch;
        struct srpt_device *sdev;
-       int res;
+       unsigned long res;
 
        ch = se_sess->fabric_sess_ptr;
        WARN_ON(ch->sess != se_sess);
@@ -3533,7 +3533,7 @@ static void srpt_close_session(struct se_session *se_sess)
        spin_unlock_irq(&sdev->spinlock);
 
        res = wait_for_completion_timeout(&release_done, 60 * HZ);
-       WARN_ON(res <= 0);
+       WARN_ON(res == 0);
 }
 
 /**
index b78425765d3eb12ccd02fd1c3dc644a1956110a1..d09cefa379316a302df754394e342cc6fe4b088c 100644 (file)
@@ -535,8 +535,7 @@ static int adi_connect(struct gameport *gameport, struct gameport_driver *drv)
                }
        }
  fail2:        for (i = 0; i < 2; i++)
-               if (port->adi[i].dev)
-                       input_free_device(port->adi[i].dev);
+               input_free_device(port->adi[i].dev);
        gameport_close(gameport);
  fail1:        gameport_set_drvdata(gameport, NULL);
        kfree(port);
index a89488aa1aa4d0ea0fefcd796646f04dbe851c22..fcef5d1365e2a3034e3a9544f7e403a0680a5058 100644 (file)
@@ -345,13 +345,11 @@ static int pxa27x_keypad_build_keycode(struct pxa27x_keypad *keypad)
 {
        const struct pxa27x_keypad_platform_data *pdata = keypad->pdata;
        struct input_dev *input_dev = keypad->input_dev;
-       const struct matrix_keymap_data *keymap_data =
-                               pdata ? pdata->matrix_keymap_data : NULL;
        unsigned short keycode;
        int i;
        int error;
 
-       error = matrix_keypad_build_keymap(keymap_data, NULL,
+       error = matrix_keypad_build_keymap(pdata->matrix_keymap_data, NULL,
                                           pdata->matrix_key_rows,
                                           pdata->matrix_key_cols,
                                           keypad->keycodes, input_dev);
index 3f4351579372ebb87aafe5af7df505ac2f485361..a0fc18fdfc0c62263c21537a2f8105a3820846e3 100644 (file)
@@ -7,29 +7,37 @@
 
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/io.h>
 #include <linux/irq.h>
 #include <linux/pm.h>
 #include <linux/platform_device.h>
 #include <linux/input.h>
 #include <linux/slab.h>
+#include <linux/platform_data/bfin_rotary.h>
 
 #include <asm/portmux.h>
-#include <asm/bfin_rotary.h>
 
-static const u16 per_cnt[] = {
-       P_CNT_CUD,
-       P_CNT_CDG,
-       P_CNT_CZM,
-       0
-};
+#define CNT_CONFIG_OFF         0       /* CNT Config Offset */
+#define CNT_IMASK_OFF          4       /* CNT Interrupt Mask Offset */
+#define CNT_STATUS_OFF         8       /* CNT Status Offset */
+#define CNT_COMMAND_OFF                12      /* CNT Command Offset */
+#define CNT_DEBOUNCE_OFF       16      /* CNT Debounce Offset */
+#define CNT_COUNTER_OFF                20      /* CNT Counter Offset */
+#define CNT_MAX_OFF            24      /* CNT Maximum Count Offset */
+#define CNT_MIN_OFF            28      /* CNT Minimum Count Offset */
 
 struct bfin_rot {
        struct input_dev *input;
+       void __iomem *base;
        int irq;
        unsigned int up_key;
        unsigned int down_key;
        unsigned int button_key;
        unsigned int rel_code;
+
+       unsigned short mode;
+       unsigned short debounce;
+
        unsigned short cnt_config;
        unsigned short cnt_imask;
        unsigned short cnt_debounce;
@@ -59,18 +67,17 @@ static void report_rotary_event(struct bfin_rot *rotary, int delta)
 
 static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
 {
-       struct platform_device *pdev = dev_id;
-       struct bfin_rot *rotary = platform_get_drvdata(pdev);
+       struct bfin_rot *rotary = dev_id;
        int delta;
 
-       switch (bfin_read_CNT_STATUS()) {
+       switch (readw(rotary->base + CNT_STATUS_OFF)) {
 
        case ICII:
                break;
 
        case UCII:
        case DCII:
-               delta = bfin_read_CNT_COUNTER();
+               delta = readl(rotary->base + CNT_COUNTER_OFF);
                if (delta)
                        report_rotary_event(rotary, delta);
                break;
@@ -83,16 +90,52 @@ static irqreturn_t bfin_rotary_isr(int irq, void *dev_id)
                break;
        }
 
-       bfin_write_CNT_COMMAND(W1LCNT_ZERO);    /* Clear COUNTER */
-       bfin_write_CNT_STATUS(-1);      /* Clear STATUS */
+       writew(W1LCNT_ZERO, rotary->base + CNT_COMMAND_OFF); /* Clear COUNTER */
+       writew(-1, rotary->base + CNT_STATUS_OFF); /* Clear STATUS */
 
        return IRQ_HANDLED;
 }
 
+static int bfin_rotary_open(struct input_dev *input)
+{
+       struct bfin_rot *rotary = input_get_drvdata(input);
+       unsigned short val;
+
+       if (rotary->mode & ROT_DEBE)
+               writew(rotary->debounce & DPRESCALE,
+                       rotary->base + CNT_DEBOUNCE_OFF);
+
+       writew(rotary->mode & ~CNTE, rotary->base + CNT_CONFIG_OFF);
+
+       val = UCIE | DCIE;
+       if (rotary->button_key)
+               val |= CZMIE;
+       writew(val, rotary->base + CNT_IMASK_OFF);
+
+       writew(rotary->mode | CNTE, rotary->base + CNT_CONFIG_OFF);
+
+       return 0;
+}
+
+static void bfin_rotary_close(struct input_dev *input)
+{
+       struct bfin_rot *rotary = input_get_drvdata(input);
+
+       writew(0, rotary->base + CNT_CONFIG_OFF);
+       writew(0, rotary->base + CNT_IMASK_OFF);
+}
+
+static void bfin_rotary_free_action(void *data)
+{
+       peripheral_free_list(data);
+}
+
 static int bfin_rotary_probe(struct platform_device *pdev)
 {
-       struct bfin_rotary_platform_data *pdata = dev_get_platdata(&pdev->dev);
+       struct device *dev = &pdev->dev;
+       const struct bfin_rotary_platform_data *pdata = dev_get_platdata(dev);
        struct bfin_rot *rotary;
+       struct resource *res;
        struct input_dev *input;
        int error;
 
@@ -102,18 +145,37 @@ static int bfin_rotary_probe(struct platform_device *pdev)
                return -EINVAL;
        }
 
-       error = peripheral_request_list(per_cnt, dev_name(&pdev->dev));
-       if (error) {
-               dev_err(&pdev->dev, "requesting peripherals failed\n");
-               return error;
+       if (pdata->pin_list) {
+               error = peripheral_request_list(pdata->pin_list,
+                                               dev_name(&pdev->dev));
+               if (error) {
+                       dev_err(dev, "requesting peripherals failed: %d\n",
+                               error);
+                       return error;
+               }
+
+               error = devm_add_action(dev, bfin_rotary_free_action,
+                                       pdata->pin_list);
+               if (error) {
+                       dev_err(dev, "setting cleanup action failed: %d\n",
+                               error);
+                       peripheral_free_list(pdata->pin_list);
+                       return error;
+               }
        }
 
-       rotary = kzalloc(sizeof(struct bfin_rot), GFP_KERNEL);
-       input = input_allocate_device();
-       if (!rotary || !input) {
-               error = -ENOMEM;
-               goto out1;
-       }
+       rotary = devm_kzalloc(dev, sizeof(struct bfin_rot), GFP_KERNEL);
+       if (!rotary)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       rotary->base = devm_ioremap_resource(dev, res);
+       if (IS_ERR(rotary->base))
+               return PTR_ERR(rotary->base);
+
+       input = devm_input_allocate_device(dev);
+       if (!input)
+               return -ENOMEM;
 
        rotary->input = input;
 
@@ -122,9 +184,8 @@ static int bfin_rotary_probe(struct platform_device *pdev)
        rotary->button_key = pdata->rotary_button_key;
        rotary->rel_code = pdata->rotary_rel_code;
 
-       error = rotary->irq = platform_get_irq(pdev, 0);
-       if (error < 0)
-               goto out1;
+       rotary->mode = pdata->mode;
+       rotary->debounce = pdata->debounce;
 
        input->name = pdev->name;
        input->phys = "bfin-rotary/input0";
@@ -137,6 +198,9 @@ static int bfin_rotary_probe(struct platform_device *pdev)
        input->id.product = 0x0001;
        input->id.version = 0x0100;
 
+       input->open = bfin_rotary_open;
+       input->close = bfin_rotary_close;
+
        if (rotary->up_key) {
                __set_bit(EV_KEY, input->evbit);
                __set_bit(rotary->up_key, input->keybit);
@@ -151,75 +215,43 @@ static int bfin_rotary_probe(struct platform_device *pdev)
                __set_bit(rotary->button_key, input->keybit);
        }
 
-       error = request_irq(rotary->irq, bfin_rotary_isr,
-                           0, dev_name(&pdev->dev), pdev);
+       /* Quiesce the device before requesting irq */
+       bfin_rotary_close(input);
+
+       rotary->irq = platform_get_irq(pdev, 0);
+       if (rotary->irq < 0) {
+               dev_err(dev, "No rotary IRQ specified\n");
+               return -ENOENT;
+       }
+
+       error = devm_request_irq(dev, rotary->irq, bfin_rotary_isr,
+                                0, dev_name(dev), rotary);
        if (error) {
-               dev_err(&pdev->dev,
-                       "unable to claim irq %d; error %d\n",
+               dev_err(dev, "unable to claim irq %d; error %d\n",
                        rotary->irq, error);
-               goto out1;
+               return error;
        }
 
        error = input_register_device(input);
        if (error) {
-               dev_err(&pdev->dev,
-                       "unable to register input device (%d)\n", error);
-               goto out2;
+               dev_err(dev, "unable to register input device (%d)\n", error);
+               return error;
        }
 
-       if (pdata->rotary_button_key)
-               bfin_write_CNT_IMASK(CZMIE);
-
-       if (pdata->mode & ROT_DEBE)
-               bfin_write_CNT_DEBOUNCE(pdata->debounce & DPRESCALE);
-
-       if (pdata->mode)
-               bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() |
-                                       (pdata->mode & ~CNTE));
-
-       bfin_write_CNT_IMASK(bfin_read_CNT_IMASK() | UCIE | DCIE);
-       bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() | CNTE);
-
        platform_set_drvdata(pdev, rotary);
        device_init_wakeup(&pdev->dev, 1);
 
        return 0;
-
-out2:
-       free_irq(rotary->irq, pdev);
-out1:
-       input_free_device(input);
-       kfree(rotary);
-       peripheral_free_list(per_cnt);
-
-       return error;
 }
 
-static int bfin_rotary_remove(struct platform_device *pdev)
-{
-       struct bfin_rot *rotary = platform_get_drvdata(pdev);
-
-       bfin_write_CNT_CONFIG(0);
-       bfin_write_CNT_IMASK(0);
-
-       free_irq(rotary->irq, pdev);
-       input_unregister_device(rotary->input);
-       peripheral_free_list(per_cnt);
-
-       kfree(rotary);
-
-       return 0;
-}
-
-#ifdef CONFIG_PM
-static int bfin_rotary_suspend(struct device *dev)
+static int __maybe_unused bfin_rotary_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct bfin_rot *rotary = platform_get_drvdata(pdev);
 
-       rotary->cnt_config = bfin_read_CNT_CONFIG();
-       rotary->cnt_imask = bfin_read_CNT_IMASK();
-       rotary->cnt_debounce = bfin_read_CNT_DEBOUNCE();
+       rotary->cnt_config = readw(rotary->base + CNT_CONFIG_OFF);
+       rotary->cnt_imask = readw(rotary->base + CNT_IMASK_OFF);
+       rotary->cnt_debounce = readw(rotary->base + CNT_DEBOUNCE_OFF);
 
        if (device_may_wakeup(&pdev->dev))
                enable_irq_wake(rotary->irq);
@@ -227,38 +259,32 @@ static int bfin_rotary_suspend(struct device *dev)
        return 0;
 }
 
-static int bfin_rotary_resume(struct device *dev)
+static int __maybe_unused bfin_rotary_resume(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
        struct bfin_rot *rotary = platform_get_drvdata(pdev);
 
-       bfin_write_CNT_DEBOUNCE(rotary->cnt_debounce);
-       bfin_write_CNT_IMASK(rotary->cnt_imask);
-       bfin_write_CNT_CONFIG(rotary->cnt_config & ~CNTE);
+       writew(rotary->cnt_debounce, rotary->base + CNT_DEBOUNCE_OFF);
+       writew(rotary->cnt_imask, rotary->base + CNT_IMASK_OFF);
+       writew(rotary->cnt_config & ~CNTE, rotary->base + CNT_CONFIG_OFF);
 
        if (device_may_wakeup(&pdev->dev))
                disable_irq_wake(rotary->irq);
 
        if (rotary->cnt_config & CNTE)
-               bfin_write_CNT_CONFIG(rotary->cnt_config);
+               writew(rotary->cnt_config, rotary->base + CNT_CONFIG_OFF);
 
        return 0;
 }
 
-static const struct dev_pm_ops bfin_rotary_pm_ops = {
-       .suspend        = bfin_rotary_suspend,
-       .resume         = bfin_rotary_resume,
-};
-#endif
+static SIMPLE_DEV_PM_OPS(bfin_rotary_pm_ops,
+                        bfin_rotary_suspend, bfin_rotary_resume);
 
 static struct platform_driver bfin_rotary_device_driver = {
        .probe          = bfin_rotary_probe,
-       .remove         = bfin_rotary_remove,
        .driver         = {
                .name   = "bfin-rotary",
-#ifdef CONFIG_PM
                .pm     = &bfin_rotary_pm_ops,
-#endif
        },
 };
 module_platform_driver(bfin_rotary_device_driver);
index 79cc0f79896fcb47e9ed31faeed02339ffe5ccb4..e8e010a85484ae3d42003bc3c0742f909b8946f6 100644 (file)
@@ -195,7 +195,7 @@ static int soc_button_probe(struct platform_device *pdev)
 
 static struct soc_button_info soc_button_PNP0C40[] = {
        { "power", 0, EV_KEY, KEY_POWER, false, true },
-       { "home", 1, EV_KEY, KEY_HOME, false, true },
+       { "home", 1, EV_KEY, KEY_LEFTMETA, false, true },
        { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
        { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
        { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false },
index f205b8be2ce4ecd2395c75dc8fc990513f3fff48..d28726a0ef858e252948d2e5f2fd009bfd6c5506 100644 (file)
@@ -99,36 +99,58 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
 #define ALPS_FOUR_BUTTONS      0x40    /* 4 direction button present */
 #define ALPS_PS2_INTERLEAVED   0x80    /* 3-byte PS/2 packet interleaved with
                                           6-byte ALPS packet */
-#define ALPS_IS_RUSHMORE       0x100   /* device is a rushmore */
 #define ALPS_BUTTONPAD         0x200   /* device is a clickpad */
 
 static const struct alps_model_info alps_model_data[] = {
-       { { 0x32, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },  /* Toshiba Salellite Pro M10 */
-       { { 0x33, 0x02, 0x0a }, 0x00, ALPS_PROTO_V1, 0x88, 0xf8, 0 },                           /* UMAX-530T */
-       { { 0x53, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x53, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x60, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },                           /* HP ze1115 */
-       { { 0x63, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x63, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x63, 0x02, 0x28 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 },                /* Fujitsu Siemens S6010 */
-       { { 0x63, 0x02, 0x3c }, 0x00, ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL },                  /* Toshiba Satellite S2400-103 */
-       { { 0x63, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 },                /* NEC Versa L320 */
-       { { 0x63, 0x02, 0x64 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x63, 0x03, 0xc8 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },  /* Dell Latitude D800 */
-       { { 0x73, 0x00, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT },              /* ThinkPad R61 8918-5QG */
-       { { 0x73, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, 0 },
-       { { 0x73, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 },                /* Ahtec Laptop */
-       { { 0x20, 0x02, 0x0e }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },  /* XXX */
-       { { 0x22, 0x02, 0x0a }, 0x00, ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT },
-       { { 0x22, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT },  /* Dell Latitude D600 */
+       { { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },      /* Toshiba Salellite Pro M10 */
+       { { 0x33, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V1, 0x88, 0xf8, 0 } },                               /* UMAX-530T */
+       { { 0x53, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x53, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x60, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },                               /* HP ze1115 */
+       { { 0x63, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x63, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x63, 0x02, 0x28 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } },            /* Fujitsu Siemens S6010 */
+       { { 0x63, 0x02, 0x3c }, 0x00, { ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL } },              /* Toshiba Satellite S2400-103 */
+       { { 0x63, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 } },            /* NEC Versa L320 */
+       { { 0x63, 0x02, 0x64 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x63, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },      /* Dell Latitude D800 */
+       { { 0x73, 0x00, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT } },          /* ThinkPad R61 8918-5QG */
+       { { 0x73, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } },
+       { { 0x73, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } },            /* Ahtec Laptop */
+
+       /*
+        * XXX This entry is suspicious. First byte has zero lower nibble,
+        * which is what a normal mouse would report. Also, the value 0x0e
+        * isn't valid per PS/2 spec.
+        */
+       { { 0x20, 0x02, 0x0e }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },
+
+       { { 0x22, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } },
+       { { 0x22, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT } },      /* Dell Latitude D600 */
        /* Dell Latitude E5500, E6400, E6500, Precision M4400 */
-       { { 0x62, 0x02, 0x14 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf,
-               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },
-       { { 0x73, 0x00, 0x14 }, 0x00, ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT },              /* Dell XT2 */
-       { { 0x73, 0x02, 0x50 }, 0x00, ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS },           /* Dell Vostro 1400 */
-       { { 0x52, 0x01, 0x14 }, 0x00, ALPS_PROTO_V2, 0xff, 0xff,
-               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED },                            /* Toshiba Tecra A11-11L */
-       { { 0x73, 0x02, 0x64 }, 0x8a, ALPS_PROTO_V4, 0x8f, 0x8f, 0 },
+       { { 0x62, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf,
+               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } },
+       { { 0x73, 0x00, 0x14 }, 0x00, { ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT } },          /* Dell XT2 */
+       { { 0x73, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS } },       /* Dell Vostro 1400 */
+       { { 0x52, 0x01, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff,
+               ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } },                          /* Toshiba Tecra A11-11L */
+       { { 0x73, 0x02, 0x64 }, 0x8a, { ALPS_PROTO_V4, 0x8f, 0x8f, 0 } },
+};
+
+static const struct alps_protocol_info alps_v3_protocol_data = {
+       ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT
+};
+
+static const struct alps_protocol_info alps_v3_rushmore_data = {
+       ALPS_PROTO_V3_RUSHMORE, 0x8f, 0x8f, ALPS_DUALPOINT
+};
+
+static const struct alps_protocol_info alps_v5_protocol_data = {
+       ALPS_PROTO_V5, 0xc8, 0xd8, 0
+};
+
+static const struct alps_protocol_info alps_v7_protocol_data = {
+       ALPS_PROTO_V7, 0x48, 0x48, ALPS_DUALPOINT
 };
 
 static void alps_set_abs_params_st(struct alps_data *priv,
@@ -136,12 +158,6 @@ static void alps_set_abs_params_st(struct alps_data *priv,
 static void alps_set_abs_params_mt(struct alps_data *priv,
                                   struct input_dev *dev1);
 
-/*
- * XXX - this entry is suspicious. First byte has zero lower nibble,
- * which is what a normal mouse would report. Also, the value 0x0e
- * isn't valid per PS/2 spec.
- */
-
 /* Packet formats are described in Documentation/input/alps.txt */
 
 static bool alps_is_valid_first_byte(struct alps_data *priv,
@@ -150,8 +166,7 @@ static bool alps_is_valid_first_byte(struct alps_data *priv,
        return (data & priv->mask0) == priv->byte0;
 }
 
-static void alps_report_buttons(struct psmouse *psmouse,
-                               struct input_dev *dev1, struct input_dev *dev2,
+static void alps_report_buttons(struct input_dev *dev1, struct input_dev *dev2,
                                int left, int right, int middle)
 {
        struct input_dev *dev;
@@ -161,20 +176,21 @@ static void alps_report_buttons(struct psmouse *psmouse,
         * other device (dev2) then this event should be also
         * sent through that device.
         */
-       dev = test_bit(BTN_LEFT, dev2->key) ? dev2 : dev1;
+       dev = (dev2 && test_bit(BTN_LEFT, dev2->key)) ? dev2 : dev1;
        input_report_key(dev, BTN_LEFT, left);
 
-       dev = test_bit(BTN_RIGHT, dev2->key) ? dev2 : dev1;
+       dev = (dev2 && test_bit(BTN_RIGHT, dev2->key)) ? dev2 : dev1;
        input_report_key(dev, BTN_RIGHT, right);
 
-       dev = test_bit(BTN_MIDDLE, dev2->key) ? dev2 : dev1;
+       dev = (dev2 && test_bit(BTN_MIDDLE, dev2->key)) ? dev2 : dev1;
        input_report_key(dev, BTN_MIDDLE, middle);
 
        /*
         * Sync the _other_ device now, we'll do the first
         * device later once we report the rest of the events.
         */
-       input_sync(dev2);
+       if (dev2)
+               input_sync(dev2);
 }
 
 static void alps_process_packet_v1_v2(struct psmouse *psmouse)
@@ -221,13 +237,13 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
                input_report_rel(dev2, REL_X,  (x > 383 ? (x - 768) : x));
                input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y));
 
-               alps_report_buttons(psmouse, dev2, dev, left, right, middle);
+               alps_report_buttons(dev2, dev, left, right, middle);
 
                input_sync(dev2);
                return;
        }
 
-       alps_report_buttons(psmouse, dev, dev2, left, right, middle);
+       alps_report_buttons(dev, dev2, left, right, middle);
 
        /* Convert hardware tap to a reasonable Z value */
        if (ges && !fin)
@@ -412,7 +428,7 @@ static int alps_process_bitmap(struct alps_data *priv,
                (2 * (priv->y_bits - 1));
 
        /* y-bitmap order is reversed, except on rushmore */
-       if (!(priv->flags & ALPS_IS_RUSHMORE)) {
+       if (priv->proto_version != ALPS_PROTO_V3_RUSHMORE) {
                fields->mt[0].y = priv->y_max - fields->mt[0].y;
                fields->mt[1].y = priv->y_max - fields->mt[1].y;
        }
@@ -648,7 +664,8 @@ static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse)
                 */
                if (f->is_mp) {
                        fingers = f->fingers;
-                       if (priv->proto_version == ALPS_PROTO_V3) {
+                       if (priv->proto_version == ALPS_PROTO_V3 ||
+                           priv->proto_version == ALPS_PROTO_V3_RUSHMORE) {
                                if (alps_process_bitmap(priv, f) == 0)
                                        fingers = 0; /* Use st data */
 
@@ -892,34 +909,6 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
                                          unsigned char *pkt,
                                          unsigned char pkt_id)
 {
-       /*
-        *       packet-fmt    b7   b6    b5   b4   b3   b2   b1   b0
-        * Byte0 TWO & MULTI    L    1     R    M    1 Y0-2 Y0-1 Y0-0
-        * Byte0 NEW            L    1  X1-5    1    1 Y0-2 Y0-1 Y0-0
-        * Byte1            Y0-10 Y0-9  Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
-        * Byte2            X0-11    1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
-        * Byte3            X1-11    1  X0-4 X0-3    1 X0-2 X0-1 X0-0
-        * Byte4 TWO        X1-10  TWO  X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
-        * Byte4 MULTI      X1-10  TWO  X1-9 X1-8 X1-7 X1-6 Y1-5    1
-        * Byte4 NEW        X1-10  TWO  X1-9 X1-8 X1-7 X1-6    0    0
-        * Byte5 TWO & NEW  Y1-10    0  Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
-        * Byte5 MULTI      Y1-10    0  Y1-9 Y1-8 Y1-7 Y1-6  F-1  F-0
-        * L:         Left button
-        * R / M:     Non-clickpads: Right / Middle button
-        *            Clickpads: When > 2 fingers are down, and some fingers
-        *            are in the button area, then the 2 coordinates reported
-        *            are for fingers outside the button area and these report
-        *            extra fingers being present in the right / left button
-        *            area. Note these fingers are not added to the F field!
-        *            so if a TWO packet is received and R = 1 then there are
-        *            3 fingers down, etc.
-        * TWO:       1: Two touches present, byte 0/4/5 are in TWO fmt
-        *            0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
-        *               otherwise byte 0 bit 4 must be set and byte 0/4/5 are
-        *               in NEW fmt
-        * F:         Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
-        */
-
        mt[0].x = ((pkt[2] & 0x80) << 4);
        mt[0].x |= ((pkt[2] & 0x3F) << 5);
        mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -1044,17 +1033,6 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
                return;
        }
 
-       /*
-        *        b7 b6 b5 b4 b3 b2 b1 b0
-        * Byte0   0  1  0  0  1  0  0  0
-        * Byte1   1  1  *  *  1  M  R  L
-        * Byte2  X7  1 X5 X4 X3 X2 X1 X0
-        * Byte3  Z6  1 Y6 X6  1 Y2 Y1 Y0
-        * Byte4  Y7  0 Y5 Y4 Y3  1  1  0
-        * Byte5 T&P  0 Z5 Z4 Z3 Z2 Z1 Z0
-        * M / R / L: Middle / Right / Left button
-        */
-
        x = ((packet[2] & 0xbf)) | ((packet[3] & 0x10) << 2);
        y = (packet[3] & 0x07) | (packet[4] & 0xb8) |
            ((packet[3] & 0x20) << 1);
@@ -1107,23 +1085,89 @@ static void alps_process_packet_v7(struct psmouse *psmouse)
                alps_process_touchpad_packet_v7(psmouse);
 }
 
-static void alps_report_bare_ps2_packet(struct psmouse *psmouse,
+static DEFINE_MUTEX(alps_mutex);
+
+static void alps_register_bare_ps2_mouse(struct work_struct *work)
+{
+       struct alps_data *priv =
+               container_of(work, struct alps_data, dev3_register_work.work);
+       struct psmouse *psmouse = priv->psmouse;
+       struct input_dev *dev3;
+       int error = 0;
+
+       mutex_lock(&alps_mutex);
+
+       if (priv->dev3)
+               goto out;
+
+       dev3 = input_allocate_device();
+       if (!dev3) {
+               psmouse_err(psmouse, "failed to allocate secondary device\n");
+               error = -ENOMEM;
+               goto out;
+       }
+
+       snprintf(priv->phys3, sizeof(priv->phys3), "%s/%s",
+                psmouse->ps2dev.serio->phys,
+                (priv->dev2 ? "input2" : "input1"));
+       dev3->phys = priv->phys3;
+
+       /*
+        * format of input device name is: "protocol vendor name"
+        * see function psmouse_switch_protocol() in psmouse-base.c
+        */
+       dev3->name = "PS/2 ALPS Mouse";
+
+       dev3->id.bustype = BUS_I8042;
+       dev3->id.vendor  = 0x0002;
+       dev3->id.product = PSMOUSE_PS2;
+       dev3->id.version = 0x0000;
+       dev3->dev.parent = &psmouse->ps2dev.serio->dev;
+
+       input_set_capability(dev3, EV_REL, REL_X);
+       input_set_capability(dev3, EV_REL, REL_Y);
+       input_set_capability(dev3, EV_KEY, BTN_LEFT);
+       input_set_capability(dev3, EV_KEY, BTN_RIGHT);
+       input_set_capability(dev3, EV_KEY, BTN_MIDDLE);
+
+       __set_bit(INPUT_PROP_POINTER, dev3->propbit);
+
+       error = input_register_device(dev3);
+       if (error) {
+               psmouse_err(psmouse,
+                           "failed to register secondary device: %d\n",
+                           error);
+               input_free_device(dev3);
+               goto out;
+       }
+
+       priv->dev3 = dev3;
+
+out:
+       /*
+        * Save the error code so that we can detect that we
+        * already tried to create the device.
+        */
+       if (error)
+               priv->dev3 = ERR_PTR(error);
+
+       mutex_unlock(&alps_mutex);
+}
+
+static void alps_report_bare_ps2_packet(struct input_dev *dev,
                                        unsigned char packet[],
                                        bool report_buttons)
 {
-       struct alps_data *priv = psmouse->private;
-       struct input_dev *dev2 = priv->dev2;
-
        if (report_buttons)
-               alps_report_buttons(psmouse, dev2, psmouse->dev,
+               alps_report_buttons(dev, NULL,
                                packet[0] & 1, packet[0] & 2, packet[0] & 4);
 
-       input_report_rel(dev2, REL_X,
+       input_report_rel(dev, REL_X,
                packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0);
-       input_report_rel(dev2, REL_Y,
+       input_report_rel(dev, REL_Y,
                packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0);
 
-       input_sync(dev2);
+       input_sync(dev);
 }
 
 static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
@@ -1188,8 +1232,8 @@ static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse)
                 * de-synchronization.
                 */
 
-               alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3],
-                                           false);
+               alps_report_bare_ps2_packet(priv->dev2,
+                                           &psmouse->packet[3], false);
 
                /*
                 * Continue with the standard ALPS protocol handling,
@@ -1245,9 +1289,18 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
         * properly we only do this if the device is fully synchronized.
         */
        if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
+
+               /* Register dev3 mouse if we received PS/2 packet first time */
+               if (unlikely(!priv->dev3))
+                       psmouse_queue_work(psmouse,
+                                          &priv->dev3_register_work, 0);
+
                if (psmouse->pktcnt == 3) {
-                       alps_report_bare_ps2_packet(psmouse, psmouse->packet,
-                                                   true);
+                       /* Once dev3 mouse device is registered report data */
+                       if (likely(!IS_ERR_OR_NULL(priv->dev3)))
+                               alps_report_bare_ps2_packet(priv->dev3,
+                                                           psmouse->packet,
+                                                           true);
                        return PSMOUSE_FULL_PACKET;
                }
                return PSMOUSE_GOOD_DATA;
@@ -1275,7 +1328,7 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
                            psmouse->pktcnt - 1,
                            psmouse->packet[psmouse->pktcnt - 1]);
 
-               if (priv->proto_version == ALPS_PROTO_V3 &&
+               if (priv->proto_version == ALPS_PROTO_V3_RUSHMORE &&
                    psmouse->pktcnt == psmouse->pktsize) {
                        /*
                         * Some Dell boxes, such as Latitude E6440 or E7440
@@ -1780,7 +1833,7 @@ static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base)
         * all.
         */
        if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) {
-               psmouse_warn(psmouse, "trackstick E7 report failed\n");
+               psmouse_warn(psmouse, "Failed to initialize trackstick (E7 report failed)\n");
                ret = -ENODEV;
        } else {
                psmouse_dbg(psmouse, "trackstick E7 report: %3ph\n", param);
@@ -1945,8 +1998,6 @@ static int alps_hw_init_rushmore_v3(struct psmouse *psmouse)
                                                   ALPS_REG_BASE_RUSHMORE);
                if (reg_val == -EIO)
                        goto error;
-               if (reg_val == -ENODEV)
-                       priv->flags &= ~ALPS_DUALPOINT;
        }
 
        if (alps_enter_command_mode(psmouse) ||
@@ -2162,11 +2213,18 @@ error:
        return ret;
 }
 
-static void alps_set_defaults(struct alps_data *priv)
+static int alps_set_protocol(struct psmouse *psmouse,
+                            struct alps_data *priv,
+                            const struct alps_protocol_info *protocol)
 {
-       priv->byte0 = 0x8f;
-       priv->mask0 = 0x8f;
-       priv->flags = ALPS_DUALPOINT;
+       psmouse->private = priv;
+
+       setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
+
+       priv->proto_version = protocol->version;
+       priv->byte0 = protocol->byte0;
+       priv->mask0 = protocol->mask0;
+       priv->flags = protocol->flags;
 
        priv->x_max = 2000;
        priv->y_max = 1400;
@@ -2182,6 +2240,7 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->x_max = 1023;
                priv->y_max = 767;
                break;
+
        case ALPS_PROTO_V3:
                priv->hw_init = alps_hw_init_v3;
                priv->process_packet = alps_process_packet_v3;
@@ -2190,6 +2249,23 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
                break;
+
+       case ALPS_PROTO_V3_RUSHMORE:
+               priv->hw_init = alps_hw_init_rushmore_v3;
+               priv->process_packet = alps_process_packet_v3;
+               priv->set_abs_params = alps_set_abs_params_mt;
+               priv->decode_fields = alps_decode_rushmore;
+               priv->nibble_commands = alps_v3_nibble_commands;
+               priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
+               priv->x_bits = 16;
+               priv->y_bits = 12;
+
+               if (alps_probe_trackstick_v3(psmouse,
+                                            ALPS_REG_BASE_RUSHMORE) < 0)
+                       priv->flags &= ~ALPS_DUALPOINT;
+
+               break;
+
        case ALPS_PROTO_V4:
                priv->hw_init = alps_hw_init_v4;
                priv->process_packet = alps_process_packet_v4;
@@ -2197,6 +2273,7 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->nibble_commands = alps_v4_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_DISABLE;
                break;
+
        case ALPS_PROTO_V5:
                priv->hw_init = alps_hw_init_dolphin_v1;
                priv->process_packet = alps_process_touchpad_packet_v3_v5;
@@ -2204,14 +2281,12 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->set_abs_params = alps_set_abs_params_mt;
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-               priv->byte0 = 0xc8;
-               priv->mask0 = 0xd8;
-               priv->flags = 0;
                priv->x_max = 1360;
                priv->y_max = 660;
                priv->x_bits = 23;
                priv->y_bits = 12;
                break;
+
        case ALPS_PROTO_V6:
                priv->hw_init = alps_hw_init_v6;
                priv->process_packet = alps_process_packet_v6;
@@ -2220,6 +2295,7 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->x_max = 2047;
                priv->y_max = 1535;
                break;
+
        case ALPS_PROTO_V7:
                priv->hw_init = alps_hw_init_v7;
                priv->process_packet = alps_process_packet_v7;
@@ -2227,19 +2303,21 @@ static void alps_set_defaults(struct alps_data *priv)
                priv->set_abs_params = alps_set_abs_params_mt;
                priv->nibble_commands = alps_v3_nibble_commands;
                priv->addr_command = PSMOUSE_CMD_RESET_WRAP;
-               priv->x_max = 0xfff;
-               priv->y_max = 0x7ff;
-               priv->byte0 = 0x48;
-               priv->mask0 = 0x48;
+
+               if (alps_dolphin_get_device_area(psmouse, priv))
+                       return -EIO;
 
                if (priv->fw_ver[1] != 0xba)
                        priv->flags |= ALPS_BUTTONPAD;
+
                break;
        }
+
+       return 0;
 }
 
-static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
-                           unsigned char *e7, unsigned char *ec)
+static const struct alps_protocol_info *alps_match_table(unsigned char *e7,
+                                                        unsigned char *ec)
 {
        const struct alps_model_info *model;
        int i;
@@ -2251,23 +2329,18 @@ static int alps_match_table(struct psmouse *psmouse, struct alps_data *priv,
                    (!model->command_mode_resp ||
                     model->command_mode_resp == ec[2])) {
 
-                       priv->proto_version = model->proto_version;
-                       alps_set_defaults(priv);
-
-                       priv->flags = model->flags;
-                       priv->byte0 = model->byte0;
-                       priv->mask0 = model->mask0;
-
-                       return 0;
+                       return &model->protocol_info;
                }
        }
 
-       return -EINVAL;
+       return NULL;
 }
 
 static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
 {
+       const struct alps_protocol_info *protocol;
        unsigned char e6[4], e7[4], ec[4];
+       int error;
 
        /*
         * First try "E6 report".
@@ -2293,54 +2366,35 @@ static int alps_identify(struct psmouse *psmouse, struct alps_data *priv)
            alps_exit_command_mode(psmouse))
                return -EIO;
 
-       /* Save the Firmware version */
-       memcpy(priv->fw_ver, ec, 3);
-
-       if (alps_match_table(psmouse, priv, e7, ec) == 0) {
-               return 0;
-       } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
-                  ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) {
-               priv->proto_version = ALPS_PROTO_V5;
-               alps_set_defaults(priv);
-               if (alps_dolphin_get_device_area(psmouse, priv))
-                       return -EIO;
-               else
-                       return 0;
-       } else if (ec[0] == 0x88 &&
-                  ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) {
-               priv->proto_version = ALPS_PROTO_V7;
-               alps_set_defaults(priv);
-
-               return 0;
-       } else if (ec[0] == 0x88 && ec[1] == 0x08) {
-               priv->proto_version = ALPS_PROTO_V3;
-               alps_set_defaults(priv);
-
-               priv->hw_init = alps_hw_init_rushmore_v3;
-               priv->decode_fields = alps_decode_rushmore;
-               priv->x_bits = 16;
-               priv->y_bits = 12;
-               priv->flags |= ALPS_IS_RUSHMORE;
-
-               /* hack to make addr_command, nibble_command available */
-               psmouse->private = priv;
-
-               if (alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE))
-                       priv->flags &= ~ALPS_DUALPOINT;
-
-               return 0;
-       } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
-                  ec[2] >= 0x90 && ec[2] <= 0x9d) {
-               priv->proto_version = ALPS_PROTO_V3;
-               alps_set_defaults(priv);
-
-               return 0;
+       protocol = alps_match_table(e7, ec);
+       if (!protocol) {
+               if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 &&
+                          ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) {
+                       protocol = &alps_v5_protocol_data;
+               } else if (ec[0] == 0x88 &&
+                          ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) {
+                       protocol = &alps_v7_protocol_data;
+               } else if (ec[0] == 0x88 && ec[1] == 0x08) {
+                       protocol = &alps_v3_rushmore_data;
+               } else if (ec[0] == 0x88 && ec[1] == 0x07 &&
+                          ec[2] >= 0x90 && ec[2] <= 0x9d) {
+                       protocol = &alps_v3_protocol_data;
+               } else {
+                       psmouse_dbg(psmouse,
+                                   "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
+                       return -EINVAL;
+               }
        }
 
-       psmouse_dbg(psmouse,
-                   "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec);
+       if (priv) {
+               /* Save the Firmware version */
+               memcpy(priv->fw_ver, ec, 3);
+               error = alps_set_protocol(psmouse, priv, protocol);
+               if (error)
+                       return error;
+       }
 
-       return -EINVAL;
+       return 0;
 }
 
 static int alps_reconnect(struct psmouse *psmouse)
@@ -2361,7 +2415,10 @@ static void alps_disconnect(struct psmouse *psmouse)
 
        psmouse_reset(psmouse);
        del_timer_sync(&priv->timer);
-       input_unregister_device(priv->dev2);
+       if (priv->dev2)
+               input_unregister_device(priv->dev2);
+       if (!IS_ERR_OR_NULL(priv->dev3))
+               input_unregister_device(priv->dev3);
        kfree(priv);
 }
 
@@ -2394,25 +2451,12 @@ static void alps_set_abs_params_mt(struct alps_data *priv,
 
 int alps_init(struct psmouse *psmouse)
 {
-       struct alps_data *priv;
-       struct input_dev *dev1 = psmouse->dev, *dev2;
-
-       priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
-       dev2 = input_allocate_device();
-       if (!priv || !dev2)
-               goto init_fail;
-
-       priv->dev2 = dev2;
-       setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse);
-
-       psmouse->private = priv;
-
-       psmouse_reset(psmouse);
-
-       if (alps_identify(psmouse, priv) < 0)
-               goto init_fail;
+       struct alps_data *priv = psmouse->private;
+       struct input_dev *dev1 = psmouse->dev;
+       int error;
 
-       if (priv->hw_init(psmouse))
+       error = priv->hw_init(psmouse);
+       if (error)
                goto init_fail;
 
        /*
@@ -2462,36 +2506,57 @@ int alps_init(struct psmouse *psmouse)
        }
 
        if (priv->flags & ALPS_DUALPOINT) {
+               struct input_dev *dev2;
+
+               dev2 = input_allocate_device();
+               if (!dev2) {
+                       psmouse_err(psmouse,
+                                   "failed to allocate trackstick device\n");
+                       error = -ENOMEM;
+                       goto init_fail;
+               }
+
+               snprintf(priv->phys2, sizeof(priv->phys2), "%s/input1",
+                        psmouse->ps2dev.serio->phys);
+               dev2->phys = priv->phys2;
+
                /*
                 * format of input device name is: "protocol vendor name"
                 * see function psmouse_switch_protocol() in psmouse-base.c
                 */
                dev2->name = "AlpsPS/2 ALPS DualPoint Stick";
+
+               dev2->id.bustype = BUS_I8042;
+               dev2->id.vendor  = 0x0002;
                dev2->id.product = PSMOUSE_ALPS;
                dev2->id.version = priv->proto_version;
-       } else {
-               dev2->name = "PS/2 ALPS Mouse";
-               dev2->id.product = PSMOUSE_PS2;
-               dev2->id.version = 0x0000;
-       }
+               dev2->dev.parent = &psmouse->ps2dev.serio->dev;
 
-       snprintf(priv->phys, sizeof(priv->phys), "%s/input1", psmouse->ps2dev.serio->phys);
-       dev2->phys = priv->phys;
-       dev2->id.bustype = BUS_I8042;
-       dev2->id.vendor  = 0x0002;
-       dev2->dev.parent = &psmouse->ps2dev.serio->dev;
+               input_set_capability(dev2, EV_REL, REL_X);
+               input_set_capability(dev2, EV_REL, REL_Y);
+               input_set_capability(dev2, EV_KEY, BTN_LEFT);
+               input_set_capability(dev2, EV_KEY, BTN_RIGHT);
+               input_set_capability(dev2, EV_KEY, BTN_MIDDLE);
 
-       dev2->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL);
-       dev2->relbit[BIT_WORD(REL_X)] = BIT_MASK(REL_X) | BIT_MASK(REL_Y);
-       dev2->keybit[BIT_WORD(BTN_LEFT)] =
-               BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT);
-
-       __set_bit(INPUT_PROP_POINTER, dev2->propbit);
-       if (priv->flags & ALPS_DUALPOINT)
+               __set_bit(INPUT_PROP_POINTER, dev2->propbit);
                __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit);
 
-       if (input_register_device(priv->dev2))
-               goto init_fail;
+               error = input_register_device(dev2);
+               if (error) {
+                       psmouse_err(psmouse,
+                                   "failed to register trackstick device: %d\n",
+                                   error);
+                       input_free_device(dev2);
+                       goto init_fail;
+               }
+
+               priv->dev2 = dev2;
+       }
+
+       priv->psmouse = psmouse;
+
+       INIT_DELAYED_WORK(&priv->dev3_register_work,
+                         alps_register_bare_ps2_mouse);
 
        psmouse->protocol_handler = alps_process_byte;
        psmouse->poll = alps_poll;
@@ -2509,25 +2574,56 @@ int alps_init(struct psmouse *psmouse)
 
 init_fail:
        psmouse_reset(psmouse);
-       input_free_device(dev2);
-       kfree(priv);
+       /*
+        * Even though we did not allocate psmouse->private we do free
+        * it here.
+        */
+       kfree(psmouse->private);
        psmouse->private = NULL;
-       return -1;
+       return error;
 }
 
 int alps_detect(struct psmouse *psmouse, bool set_properties)
 {
-       struct alps_data dummy;
+       struct alps_data *priv;
+       int error;
 
-       if (alps_identify(psmouse, &dummy) < 0)
-               return -1;
+       error = alps_identify(psmouse, NULL);
+       if (error)
+               return error;
+
+       /*
+        * Reset the device to make sure it is fully operational:
+        * on some laptops, like certain Dell Latitudes, we may
+        * fail to properly detect presence of trackstick if device
+        * has not been reset.
+        */
+       psmouse_reset(psmouse);
+
+       priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       error = alps_identify(psmouse, priv);
+       if (error)
+               return error;
 
        if (set_properties) {
                psmouse->vendor = "ALPS";
-               psmouse->name = dummy.flags & ALPS_DUALPOINT ?
+               psmouse->name = priv->flags & ALPS_DUALPOINT ?
                                "DualPoint TouchPad" : "GlidePoint";
-               psmouse->model = dummy.proto_version << 8;
+               psmouse->model = priv->proto_version;
+       } else {
+               /*
+                * Destroy alps_data structure we allocated earlier since
+                * this was just a "trial run". Otherwise we'll keep it
+                * to be used by alps_init() which has to be called if
+                * we succeed and set_properties is true.
+                */
+               kfree(priv);
+               psmouse->private = NULL;
        }
+
        return 0;
 }
 
index 66240b47819a9569c975cedd1d5443b2b59f6b86..02513c0502fc1309a9f8285ea2f4e3237fd7464a 100644 (file)
 
 #include <linux/input/mt.h>
 
-#define ALPS_PROTO_V1  1
-#define ALPS_PROTO_V2  2
-#define ALPS_PROTO_V3  3
-#define ALPS_PROTO_V4  4
-#define ALPS_PROTO_V5  5
-#define ALPS_PROTO_V6  6
-#define ALPS_PROTO_V7  7       /* t3btl t4s */
+#define ALPS_PROTO_V1          0x100
+#define ALPS_PROTO_V2          0x200
+#define ALPS_PROTO_V3          0x300
+#define ALPS_PROTO_V3_RUSHMORE 0x310
+#define ALPS_PROTO_V4          0x400
+#define ALPS_PROTO_V5          0x500
+#define ALPS_PROTO_V6          0x600
+#define ALPS_PROTO_V7          0x700   /* t3btl t4s */
 
 #define MAX_TOUCHES    2
 
@@ -45,6 +46,21 @@ enum V7_PACKET_ID {
         V7_PACKET_ID_UNKNOWN,
 };
 
+/**
+ * struct alps_protocol_info - information about protocol used by a device
+ * @version: Indicates V1/V2/V3/...
+ * @byte0: Helps figure out whether a position report packet matches the
+ *   known format for this model.  The first byte of the report, ANDed with
+ *   mask0, should match byte0.
+ * @mask0: The mask used to check the first byte of the report.
+ * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ */
+struct alps_protocol_info {
+       u16 version;
+       u8 byte0, mask0;
+       unsigned int flags;
+};
+
 /**
  * struct alps_model_info - touchpad ID table
  * @signature: E7 response string to match.
@@ -52,23 +68,16 @@ enum V7_PACKET_ID {
  *   (aka command mode response) identifies the firmware minor version.  This
  *   can be used to distinguish different hardware models which are not
  *   uniquely identifiable through their E7 responses.
- * @proto_version: Indicates V1/V2/V3/...
- * @byte0: Helps figure out whether a position report packet matches the
- *   known format for this model.  The first byte of the report, ANDed with
- *   mask0, should match byte0.
- * @mask0: The mask used to check the first byte of the report.
- * @flags: Additional device capabilities (passthrough port, trackstick, etc.).
+ * @protocol_info: information about protcol used by the device.
  *
  * Many (but not all) ALPS touchpads can be identified by looking at the
  * values returned in the "E7 report" and/or the "EC report."  This table
  * lists a number of such touchpads.
  */
 struct alps_model_info {
-       unsigned char signature[3];
-       unsigned char command_mode_resp;
-       unsigned char proto_version;
-       unsigned char byte0, mask0;
-       int flags;
+       u8 signature[3];
+       u8 command_mode_resp;
+       struct alps_protocol_info protocol_info;
 };
 
 /**
@@ -132,8 +141,12 @@ struct alps_fields {
 
 /**
  * struct alps_data - private data structure for the ALPS driver
- * @dev2: "Relative" device used to report trackstick or mouse activity.
- * @phys: Physical path for the relative device.
+ * @psmouse: Pointer to parent psmouse device
+ * @dev2: Trackstick device (can be NULL).
+ * @dev3: Generic PS/2 mouse (can be NULL, delayed registering).
+ * @phys2: Physical path for the trackstick device.
+ * @phys3: Physical path for the generic PS/2 mouse.
+ * @dev3_register_work: Delayed work for registering PS/2 mouse.
  * @nibble_commands: Command mapping used for touchpad register accesses.
  * @addr_command: Command used to tell the touchpad that a register address
  *   follows.
@@ -160,15 +173,19 @@ struct alps_fields {
  * @timer: Timer for flushing out the final report packet in the stream.
  */
 struct alps_data {
+       struct psmouse *psmouse;
        struct input_dev *dev2;
-       char phys[32];
+       struct input_dev *dev3;
+       char phys2[32];
+       char phys3[32];
+       struct delayed_work dev3_register_work;
 
        /* these are autodetected when the device is identified */
        const struct alps_nibble_commands *nibble_commands;
        int addr_command;
-       unsigned char proto_version;
-       unsigned char byte0, mask0;
-       unsigned char fw_ver[3];
+       u16 proto_version;
+       u8 byte0, mask0;
+       u8 fw_ver[3];
        int flags;
        int x_max;
        int y_max;
index 9118a1861a45cf0629f6380112eafeb2cc9b13b9..28dcfc822bf647f4386239d49487e111020a2272 100644 (file)
@@ -710,8 +710,3 @@ err_exit:
 
        return -1;
 }
-
-bool cypress_supported(void)
-{
-       return true;
-}
index 4720f21d2d70cfe2dbf20a626a5133010ba3e320..81f68aaed7c8567d3cfbfb4afc56008815ead34d 100644 (file)
@@ -172,7 +172,6 @@ struct cytp_data {
 #ifdef CONFIG_MOUSE_PS2_CYPRESS
 int cypress_detect(struct psmouse *psmouse, bool set_properties);
 int cypress_init(struct psmouse *psmouse);
-bool cypress_supported(void);
 #else
 inline int cypress_detect(struct psmouse *psmouse, bool set_properties)
 {
@@ -182,10 +181,6 @@ inline int cypress_init(struct psmouse *psmouse)
 {
        return -ENOSYS;
 }
-inline bool cypress_supported(void)
-{
-       return 0;
-}
 #endif /* CONFIG_MOUSE_PS2_CYPRESS */
 
 #endif  /* _CYPRESS_PS2_H */
index fca38ba63bbe7f73f6e01195bd2e20395d683dd5..757f78a94aeccb1be6b80819f75752a09e705bf6 100644 (file)
@@ -424,11 +424,6 @@ fail:
        return error;
 }
 
-bool focaltech_supported(void)
-{
-       return true;
-}
-
 #else /* CONFIG_MOUSE_PS2_FOCALTECH */
 
 int focaltech_init(struct psmouse *psmouse)
@@ -438,9 +433,4 @@ int focaltech_init(struct psmouse *psmouse)
        return 0;
 }
 
-bool focaltech_supported(void)
-{
-       return false;
-}
-
 #endif /* CONFIG_MOUSE_PS2_FOCALTECH */
index 71870a9b548a8cd0a695fb9327b88e75daa83eb8..ca61ebff373e99a194011c1d8554d8ffb0766981 100644 (file)
@@ -19,6 +19,5 @@
 
 int focaltech_detect(struct psmouse *psmouse, bool set_properties);
 int focaltech_init(struct psmouse *psmouse);
-bool focaltech_supported(void);
 
 #endif
index 68469feda470d9d8b34c249cbcd02426795f94c8..4ccd01d7a48de9639a637db4a757c2c09c6c6836 100644 (file)
@@ -727,7 +727,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
        if (psmouse_do_detect(focaltech_detect, psmouse, set_properties) == 0) {
                if (max_proto > PSMOUSE_IMEX) {
                        if (!set_properties || focaltech_init(psmouse) == 0) {
-                               if (focaltech_supported())
+                               if (IS_ENABLED(CONFIG_MOUSE_PS2_FOCALTECH))
                                        return PSMOUSE_FOCALTECH;
                                /*
                                 * Note that we need to also restrict
@@ -776,7 +776,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
  * Try activating protocol, but check if support is enabled first, since
  * we try detecting Synaptics even when protocol is disabled.
  */
-                       if (synaptics_supported() &&
+                       if (IS_ENABLED(CONFIG_MOUSE_PS2_SYNAPTICS) &&
                            (!set_properties || synaptics_init(psmouse) == 0)) {
                                return PSMOUSE_SYNAPTICS;
                        }
@@ -801,7 +801,7 @@ static int psmouse_extensions(struct psmouse *psmouse,
  */
        if (max_proto > PSMOUSE_IMEX &&
                        cypress_detect(psmouse, set_properties) == 0) {
-               if (cypress_supported()) {
+               if (IS_ENABLED(CONFIG_MOUSE_PS2_CYPRESS)) {
                        if (cypress_init(psmouse) == 0)
                                return PSMOUSE_CYPRESS;
 
index 7e705ee90b86cf0c86729aacbf6c7a1d981b0a12..f2cceb6493a0aea304c838043735ac3889041438 100644 (file)
@@ -1454,11 +1454,6 @@ int synaptics_init_relative(struct psmouse *psmouse)
        return __synaptics_init(psmouse, false);
 }
 
-bool synaptics_supported(void)
-{
-       return true;
-}
-
 #else /* CONFIG_MOUSE_PS2_SYNAPTICS */
 
 void __init synaptics_module_init(void)
@@ -1470,9 +1465,4 @@ int synaptics_init(struct psmouse *psmouse)
        return -ENOSYS;
 }
 
-bool synaptics_supported(void)
-{
-       return false;
-}
-
 #endif /* CONFIG_MOUSE_PS2_SYNAPTICS */
index 6faf9bb7c117d46f40af90a9232fc2810c921155..aedc3299b14e2b753c1e9d51da95b0953ea081cb 100644 (file)
@@ -175,6 +175,5 @@ int synaptics_detect(struct psmouse *psmouse, bool set_properties);
 int synaptics_init(struct psmouse *psmouse);
 int synaptics_init_relative(struct psmouse *psmouse);
 void synaptics_reset(struct psmouse *psmouse);
-bool synaptics_supported(void);
 
 #endif /* _SYNAPTICS_H */
index 1daa7ca04577de0854f77b1ff4d31faf241e0baa..9acdc080e7ecd21b2cd256c18e31c9c27337c7d1 100644 (file)
@@ -192,14 +192,6 @@ static bool gic_local_irq_is_routable(int intr)
        }
 }
 
-unsigned int gic_get_timer_pending(void)
-{
-       unsigned int vpe_pending;
-
-       vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
-       return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
-}
-
 static void gic_bind_eic_interrupt(int irq, int set)
 {
        /* Convert irq vector # to hw int # */
index b8611e3e5e7451501fb1df22b27ef252c99dee21..09df54fc1fef2162bf06228dddc431f2f7a9feeb 100644 (file)
@@ -24,7 +24,7 @@ config MISDN_HFCMULTI
           * HFC-E1 (E1 interface for 2Mbit ISDN)
 
 config MISDN_HFCMULTI_8xx
-       boolean "Support for XHFC embedded board in HFC multiport driver"
+       bool "Support for XHFC embedded board in HFC multiport driver"
        depends on MISDN
        depends on MISDN_HFCMULTI
        depends on 8xx
index c39644478aa4e660f0ec2ddefedea4efbfd776b2..63e05e32b46269e29f8e75e03073d5587fd6d916 100644 (file)
@@ -178,7 +178,7 @@ config MD_FAULTY
 source "drivers/md/bcache/Kconfig"
 
 config BLK_DEV_DM_BUILTIN
-       boolean
+       bool
 
 config BLK_DEV_DM
        tristate "Device mapper support"
@@ -197,7 +197,7 @@ config BLK_DEV_DM
          If unsure, say N.
 
 config DM_DEBUG
-       boolean "Device mapper debugging support"
+       bool "Device mapper debugging support"
        depends on BLK_DEV_DM
        ---help---
          Enable this for messages that may help debug device-mapper problems.
index 08981be7baa183dbe963b6e38cd4866f34e278a7..713a96237a80c34951302dfa4a5ea6db9f44c39b 100644 (file)
 #include <linux/slab.h>
 #include <linux/crypto.h>
 #include <linux/workqueue.h>
+#include <linux/kthread.h>
 #include <linux/backing-dev.h>
 #include <linux/atomic.h>
 #include <linux/scatterlist.h>
+#include <linux/rbtree.h>
 #include <asm/page.h>
 #include <asm/unaligned.h>
 #include <crypto/hash.h>
@@ -58,7 +60,8 @@ struct dm_crypt_io {
        atomic_t io_pending;
        int error;
        sector_t sector;
-       struct dm_crypt_io *base_io;
+
+       struct rb_node rb_node;
 } CRYPTO_MINALIGN_ATTR;
 
 struct dm_crypt_request {
@@ -108,7 +111,8 @@ struct iv_tcw_private {
  * Crypt: maps a linear range of a block device
  * and encrypts / decrypts at the same time.
  */
-enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID };
+enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
+            DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
 
 /*
  * The fields in here must be read only after initialization.
@@ -121,14 +125,18 @@ struct crypt_config {
         * pool for per bio private data, crypto requests and
         * encryption requeusts/buffer pages
         */
-       mempool_t *io_pool;
        mempool_t *req_pool;
        mempool_t *page_pool;
        struct bio_set *bs;
+       struct mutex bio_alloc_lock;
 
        struct workqueue_struct *io_queue;
        struct workqueue_struct *crypt_queue;
 
+       struct task_struct *write_thread;
+       wait_queue_head_t write_thread_wait;
+       struct rb_root write_tree;
+
        char *cipher;
        char *cipher_string;
 
@@ -172,9 +180,6 @@ struct crypt_config {
 };
 
 #define MIN_IOS        16
-#define MIN_POOL_PAGES 32
-
-static struct kmem_cache *_crypt_io_pool;
 
 static void clone_init(struct dm_crypt_io *, struct bio *);
 static void kcryptd_queue_crypt(struct dm_crypt_io *io);
@@ -946,57 +951,70 @@ static int crypt_convert(struct crypt_config *cc,
        return 0;
 }
 
+static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone);
+
 /*
  * Generate a new unfragmented bio with the given size
  * This should never violate the device limitations
- * May return a smaller bio when running out of pages, indicated by
- * *out_of_pages set to 1.
+ *
+ * This function may be called concurrently. If we allocate from the mempool
+ * concurrently, there is a possibility of deadlock. For example, if we have
+ * mempool of 256 pages, two processes, each wanting 256, pages allocate from
+ * the mempool concurrently, it may deadlock in a situation where both processes
+ * have allocated 128 pages and the mempool is exhausted.
+ *
+ * In order to avoid this scenario we allocate the pages under a mutex.
+ *
+ * In order to not degrade performance with excessive locking, we try
+ * non-blocking allocations without a mutex first but on failure we fallback
+ * to blocking allocations with a mutex.
  */
-static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
-                                     unsigned *out_of_pages)
+static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size)
 {
        struct crypt_config *cc = io->cc;
        struct bio *clone;
        unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       gfp_t gfp_mask = GFP_NOIO | __GFP_HIGHMEM;
-       unsigned i, len;
+       gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
+       unsigned i, len, remaining_size;
        struct page *page;
+       struct bio_vec *bvec;
+
+retry:
+       if (unlikely(gfp_mask & __GFP_WAIT))
+               mutex_lock(&cc->bio_alloc_lock);
 
        clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
        if (!clone)
-               return NULL;
+               goto return_clone;
 
        clone_init(io, clone);
-       *out_of_pages = 0;
+
+       remaining_size = size;
 
        for (i = 0; i < nr_iovecs; i++) {
                page = mempool_alloc(cc->page_pool, gfp_mask);
                if (!page) {
-                       *out_of_pages = 1;
-                       break;
+                       crypt_free_buffer_pages(cc, clone);
+                       bio_put(clone);
+                       gfp_mask |= __GFP_WAIT;
+                       goto retry;
                }
 
-               /*
-                * If additional pages cannot be allocated without waiting,
-                * return a partially-allocated bio.  The caller will then try
-                * to allocate more bios while submitting this partial bio.
-                */
-               gfp_mask = (gfp_mask | __GFP_NOWARN) & ~__GFP_WAIT;
+               len = (remaining_size > PAGE_SIZE) ? PAGE_SIZE : remaining_size;
 
-               len = (size > PAGE_SIZE) ? PAGE_SIZE : size;
+               bvec = &clone->bi_io_vec[clone->bi_vcnt++];
+               bvec->bv_page = page;
+               bvec->bv_len = len;
+               bvec->bv_offset = 0;
 
-               if (!bio_add_page(clone, page, len, 0)) {
-                       mempool_free(page, cc->page_pool);
-                       break;
-               }
+               clone->bi_iter.bi_size += len;
 
-               size -= len;
+               remaining_size -= len;
        }
 
-       if (!clone->bi_iter.bi_size) {
-               bio_put(clone);
-               return NULL;
-       }
+return_clone:
+       if (unlikely(gfp_mask & __GFP_WAIT))
+               mutex_unlock(&cc->bio_alloc_lock);
 
        return clone;
 }
@@ -1020,7 +1038,6 @@ static void crypt_io_init(struct dm_crypt_io *io, struct crypt_config *cc,
        io->base_bio = bio;
        io->sector = sector;
        io->error = 0;
-       io->base_io = NULL;
        io->ctx.req = NULL;
        atomic_set(&io->io_pending, 0);
 }
@@ -1033,13 +1050,11 @@ static void crypt_inc_pending(struct dm_crypt_io *io)
 /*
  * One of the bios was finished. Check for completion of
  * the whole request and correctly clean up the buffer.
- * If base_io is set, wait for the last fragment to complete.
  */
 static void crypt_dec_pending(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
        struct bio *base_bio = io->base_bio;
-       struct dm_crypt_io *base_io = io->base_io;
        int error = io->error;
 
        if (!atomic_dec_and_test(&io->io_pending))
@@ -1047,16 +1062,8 @@ static void crypt_dec_pending(struct dm_crypt_io *io)
 
        if (io->ctx.req)
                crypt_free_req(cc, io->ctx.req, base_bio);
-       if (io != dm_per_bio_data(base_bio, cc->per_bio_data_size))
-               mempool_free(io, cc->io_pool);
-
-       if (likely(!base_io))
-               bio_endio(base_bio, error);
-       else {
-               if (error && !base_io->error)
-                       base_io->error = error;
-               crypt_dec_pending(base_io);
-       }
+
+       bio_endio(base_bio, error);
 }
 
 /*
@@ -1138,37 +1145,97 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
        return 0;
 }
 
+static void kcryptd_io_read_work(struct work_struct *work)
+{
+       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+
+       crypt_inc_pending(io);
+       if (kcryptd_io_read(io, GFP_NOIO))
+               io->error = -ENOMEM;
+       crypt_dec_pending(io);
+}
+
+static void kcryptd_queue_read(struct dm_crypt_io *io)
+{
+       struct crypt_config *cc = io->cc;
+
+       INIT_WORK(&io->work, kcryptd_io_read_work);
+       queue_work(cc->io_queue, &io->work);
+}
+
 static void kcryptd_io_write(struct dm_crypt_io *io)
 {
        struct bio *clone = io->ctx.bio_out;
+
        generic_make_request(clone);
 }
 
-static void kcryptd_io(struct work_struct *work)
+#define crypt_io_from_node(node) rb_entry((node), struct dm_crypt_io, rb_node)
+
+static int dmcrypt_write(void *data)
 {
-       struct dm_crypt_io *io = container_of(work, struct dm_crypt_io, work);
+       struct crypt_config *cc = data;
+       struct dm_crypt_io *io;
 
-       if (bio_data_dir(io->base_bio) == READ) {
-               crypt_inc_pending(io);
-               if (kcryptd_io_read(io, GFP_NOIO))
-                       io->error = -ENOMEM;
-               crypt_dec_pending(io);
-       } else
-               kcryptd_io_write(io);
-}
+       while (1) {
+               struct rb_root write_tree;
+               struct blk_plug plug;
 
-static void kcryptd_queue_io(struct dm_crypt_io *io)
-{
-       struct crypt_config *cc = io->cc;
+               DECLARE_WAITQUEUE(wait, current);
 
-       INIT_WORK(&io->work, kcryptd_io);
-       queue_work(cc->io_queue, &io->work);
+               spin_lock_irq(&cc->write_thread_wait.lock);
+continue_locked:
+
+               if (!RB_EMPTY_ROOT(&cc->write_tree))
+                       goto pop_from_list;
+
+               __set_current_state(TASK_INTERRUPTIBLE);
+               __add_wait_queue(&cc->write_thread_wait, &wait);
+
+               spin_unlock_irq(&cc->write_thread_wait.lock);
+
+               if (unlikely(kthread_should_stop())) {
+                       set_task_state(current, TASK_RUNNING);
+                       remove_wait_queue(&cc->write_thread_wait, &wait);
+                       break;
+               }
+
+               schedule();
+
+               set_task_state(current, TASK_RUNNING);
+               spin_lock_irq(&cc->write_thread_wait.lock);
+               __remove_wait_queue(&cc->write_thread_wait, &wait);
+               goto continue_locked;
+
+pop_from_list:
+               write_tree = cc->write_tree;
+               cc->write_tree = RB_ROOT;
+               spin_unlock_irq(&cc->write_thread_wait.lock);
+
+               BUG_ON(rb_parent(write_tree.rb_node));
+
+               /*
+                * Note: we cannot walk the tree here with rb_next because
+                * the structures may be freed when kcryptd_io_write is called.
+                */
+               blk_start_plug(&plug);
+               do {
+                       io = crypt_io_from_node(rb_first(&write_tree));
+                       rb_erase(&io->rb_node, &write_tree);
+                       kcryptd_io_write(io);
+               } while (!RB_EMPTY_ROOT(&write_tree));
+               blk_finish_plug(&plug);
+       }
+       return 0;
 }
 
 static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
 {
        struct bio *clone = io->ctx.bio_out;
        struct crypt_config *cc = io->cc;
+       unsigned long flags;
+       sector_t sector;
+       struct rb_node **rbp, *parent;
 
        if (unlikely(io->error < 0)) {
                crypt_free_buffer_pages(cc, clone);
@@ -1182,20 +1249,34 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
 
        clone->bi_iter.bi_sector = cc->start + io->sector;
 
-       if (async)
-               kcryptd_queue_io(io);
-       else
+       if (likely(!async) && test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags)) {
                generic_make_request(clone);
+               return;
+       }
+
+       spin_lock_irqsave(&cc->write_thread_wait.lock, flags);
+       rbp = &cc->write_tree.rb_node;
+       parent = NULL;
+       sector = io->sector;
+       while (*rbp) {
+               parent = *rbp;
+               if (sector < crypt_io_from_node(parent)->sector)
+                       rbp = &(*rbp)->rb_left;
+               else
+                       rbp = &(*rbp)->rb_right;
+       }
+       rb_link_node(&io->rb_node, parent, rbp);
+       rb_insert_color(&io->rb_node, &cc->write_tree);
+
+       wake_up_locked(&cc->write_thread_wait);
+       spin_unlock_irqrestore(&cc->write_thread_wait.lock, flags);
 }
 
 static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
 {
        struct crypt_config *cc = io->cc;
        struct bio *clone;
-       struct dm_crypt_io *new_io;
        int crypt_finished;
-       unsigned out_of_pages = 0;
-       unsigned remaining = io->base_bio->bi_iter.bi_size;
        sector_t sector = io->sector;
        int r;
 
@@ -1205,80 +1286,30 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
        crypt_inc_pending(io);
        crypt_convert_init(cc, &io->ctx, NULL, io->base_bio, sector);
 
-       /*
-        * The allocated buffers can be smaller than the whole bio,
-        * so repeat the whole process until all the data can be handled.
-        */
-       while (remaining) {
-               clone = crypt_alloc_buffer(io, remaining, &out_of_pages);
-               if (unlikely(!clone)) {
-                       io->error = -ENOMEM;
-                       break;
-               }
-
-               io->ctx.bio_out = clone;
-               io->ctx.iter_out = clone->bi_iter;
-
-               remaining -= clone->bi_iter.bi_size;
-               sector += bio_sectors(clone);
-
-               crypt_inc_pending(io);
-
-               r = crypt_convert(cc, &io->ctx);
-               if (r < 0)
-                       io->error = -EIO;
-
-               crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
-
-               /* Encryption was already finished, submit io now */
-               if (crypt_finished) {
-                       kcryptd_crypt_write_io_submit(io, 0);
-
-                       /*
-                        * If there was an error, do not try next fragments.
-                        * For async, error is processed in async handler.
-                        */
-                       if (unlikely(r < 0))
-                               break;
+       clone = crypt_alloc_buffer(io, io->base_bio->bi_iter.bi_size);
+       if (unlikely(!clone)) {
+               io->error = -EIO;
+               goto dec;
+       }
 
-                       io->sector = sector;
-               }
+       io->ctx.bio_out = clone;
+       io->ctx.iter_out = clone->bi_iter;
 
-               /*
-                * Out of memory -> run queues
-                * But don't wait if split was due to the io size restriction
-                */
-               if (unlikely(out_of_pages))
-                       congestion_wait(BLK_RW_ASYNC, HZ/100);
+       sector += bio_sectors(clone);
 
-               /*
-                * With async crypto it is unsafe to share the crypto context
-                * between fragments, so switch to a new dm_crypt_io structure.
-                */
-               if (unlikely(!crypt_finished && remaining)) {
-                       new_io = mempool_alloc(cc->io_pool, GFP_NOIO);
-                       crypt_io_init(new_io, io->cc, io->base_bio, sector);
-                       crypt_inc_pending(new_io);
-                       crypt_convert_init(cc, &new_io->ctx, NULL,
-                                          io->base_bio, sector);
-                       new_io->ctx.iter_in = io->ctx.iter_in;
-
-                       /*
-                        * Fragments after the first use the base_io
-                        * pending count.
-                        */
-                       if (!io->base_io)
-                               new_io->base_io = io;
-                       else {
-                               new_io->base_io = io->base_io;
-                               crypt_inc_pending(io->base_io);
-                               crypt_dec_pending(io);
-                       }
+       crypt_inc_pending(io);
+       r = crypt_convert(cc, &io->ctx);
+       if (r)
+               io->error = -EIO;
+       crypt_finished = atomic_dec_and_test(&io->ctx.cc_pending);
 
-                       io = new_io;
-               }
+       /* Encryption was already finished, submit io now */
+       if (crypt_finished) {
+               kcryptd_crypt_write_io_submit(io, 0);
+               io->sector = sector;
        }
 
+dec:
        crypt_dec_pending(io);
 }
 
@@ -1481,6 +1512,9 @@ static void crypt_dtr(struct dm_target *ti)
        if (!cc)
                return;
 
+       if (cc->write_thread)
+               kthread_stop(cc->write_thread);
+
        if (cc->io_queue)
                destroy_workqueue(cc->io_queue);
        if (cc->crypt_queue)
@@ -1495,8 +1529,6 @@ static void crypt_dtr(struct dm_target *ti)
                mempool_destroy(cc->page_pool);
        if (cc->req_pool)
                mempool_destroy(cc->req_pool);
-       if (cc->io_pool)
-               mempool_destroy(cc->io_pool);
 
        if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
                cc->iv_gen_ops->dtr(cc);
@@ -1688,7 +1720,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        char dummy;
 
        static struct dm_arg _args[] = {
-               {0, 1, "Invalid number of feature args"},
+               {0, 3, "Invalid number of feature args"},
        };
 
        if (argc < 5) {
@@ -1710,13 +1742,6 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        if (ret < 0)
                goto bad;
 
-       ret = -ENOMEM;
-       cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool);
-       if (!cc->io_pool) {
-               ti->error = "Cannot allocate crypt io mempool";
-               goto bad;
-       }
-
        cc->dmreq_start = sizeof(struct ablkcipher_request);
        cc->dmreq_start += crypto_ablkcipher_reqsize(any_tfm(cc));
        cc->dmreq_start = ALIGN(cc->dmreq_start, __alignof__(struct dm_crypt_request));
@@ -1734,6 +1759,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                iv_size_padding = crypto_ablkcipher_alignmask(any_tfm(cc));
        }
 
+       ret = -ENOMEM;
        cc->req_pool = mempool_create_kmalloc_pool(MIN_IOS, cc->dmreq_start +
                        sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size);
        if (!cc->req_pool) {
@@ -1746,7 +1772,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                      sizeof(struct dm_crypt_request) + iv_size_padding + cc->iv_size,
                      ARCH_KMALLOC_MINALIGN);
 
-       cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0);
+       cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
        if (!cc->page_pool) {
                ti->error = "Cannot allocate page mempool";
                goto bad;
@@ -1758,6 +1784,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
+       mutex_init(&cc->bio_alloc_lock);
+
        ret = -EINVAL;
        if (sscanf(argv[2], "%llu%c", &tmpll, &dummy) != 1) {
                ti->error = "Invalid iv_offset sector";
@@ -1788,15 +1816,26 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                if (ret)
                        goto bad;
 
-               opt_string = dm_shift_arg(&as);
+               while (opt_params--) {
+                       opt_string = dm_shift_arg(&as);
+                       if (!opt_string) {
+                               ti->error = "Not enough feature arguments";
+                               goto bad;
+                       }
 
-               if (opt_params == 1 && opt_string &&
-                   !strcasecmp(opt_string, "allow_discards"))
-                       ti->num_discard_bios = 1;
-               else if (opt_params) {
-                       ret = -EINVAL;
-                       ti->error = "Invalid feature arguments";
-                       goto bad;
+                       if (!strcasecmp(opt_string, "allow_discards"))
+                               ti->num_discard_bios = 1;
+
+                       else if (!strcasecmp(opt_string, "same_cpu_crypt"))
+                               set_bit(DM_CRYPT_SAME_CPU, &cc->flags);
+
+                       else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
+                               set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
+
+                       else {
+                               ti->error = "Invalid feature arguments";
+                               goto bad;
+                       }
                }
        }
 
@@ -1807,13 +1846,28 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
                goto bad;
        }
 
-       cc->crypt_queue = alloc_workqueue("kcryptd",
-                                         WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+       if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
+               cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM, 1);
+       else
+               cc->crypt_queue = alloc_workqueue("kcryptd", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+                                                 num_online_cpus());
        if (!cc->crypt_queue) {
                ti->error = "Couldn't create kcryptd queue";
                goto bad;
        }
 
+       init_waitqueue_head(&cc->write_thread_wait);
+       cc->write_tree = RB_ROOT;
+
+       cc->write_thread = kthread_create(dmcrypt_write, cc, "dmcrypt_write");
+       if (IS_ERR(cc->write_thread)) {
+               ret = PTR_ERR(cc->write_thread);
+               cc->write_thread = NULL;
+               ti->error = "Couldn't spawn write thread";
+               goto bad;
+       }
+       wake_up_process(cc->write_thread);
+
        ti->num_flush_bios = 1;
        ti->discard_zeroes_data_unsupported = true;
 
@@ -1848,7 +1902,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
 
        if (bio_data_dir(io->base_bio) == READ) {
                if (kcryptd_io_read(io, GFP_NOWAIT))
-                       kcryptd_queue_io(io);
+                       kcryptd_queue_read(io);
        } else
                kcryptd_queue_crypt(io);
 
@@ -1860,6 +1914,7 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
 {
        struct crypt_config *cc = ti->private;
        unsigned i, sz = 0;
+       int num_feature_args = 0;
 
        switch (type) {
        case STATUSTYPE_INFO:
@@ -1878,8 +1933,18 @@ static void crypt_status(struct dm_target *ti, status_type_t type,
                DMEMIT(" %llu %s %llu", (unsigned long long)cc->iv_offset,
                                cc->dev->name, (unsigned long long)cc->start);
 
-               if (ti->num_discard_bios)
-                       DMEMIT(" 1 allow_discards");
+               num_feature_args += !!ti->num_discard_bios;
+               num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
+               num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
+               if (num_feature_args) {
+                       DMEMIT(" %d", num_feature_args);
+                       if (ti->num_discard_bios)
+                               DMEMIT(" allow_discards");
+                       if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
+                               DMEMIT(" same_cpu_crypt");
+                       if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
+                               DMEMIT(" submit_from_crypt_cpus");
+               }
 
                break;
        }
@@ -1976,7 +2041,7 @@ static int crypt_iterate_devices(struct dm_target *ti,
 
 static struct target_type crypt_target = {
        .name   = "crypt",
-       .version = {1, 13, 0},
+       .version = {1, 14, 0},
        .module = THIS_MODULE,
        .ctr    = crypt_ctr,
        .dtr    = crypt_dtr,
@@ -1994,15 +2059,9 @@ static int __init dm_crypt_init(void)
 {
        int r;
 
-       _crypt_io_pool = KMEM_CACHE(dm_crypt_io, 0);
-       if (!_crypt_io_pool)
-               return -ENOMEM;
-
        r = dm_register_target(&crypt_target);
-       if (r < 0) {
+       if (r < 0)
                DMERR("register failed %d", r);
-               kmem_cache_destroy(_crypt_io_pool);
-       }
 
        return r;
 }
@@ -2010,7 +2069,6 @@ static int __init dm_crypt_init(void)
 static void __exit dm_crypt_exit(void)
 {
        dm_unregister_target(&crypt_target);
-       kmem_cache_destroy(_crypt_io_pool);
 }
 
 module_init(dm_crypt_init);
index c09359db3a90730dbd32b3bd733709f3c6444192..37de0173b6d2324ed15de95442df37bc5a990e16 100644 (file)
@@ -290,6 +290,12 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
        unsigned short logical_block_size = queue_logical_block_size(q);
        sector_t num_sectors;
 
+       /* Reject unsupported discard requests */
+       if ((rw & REQ_DISCARD) && !blk_queue_discard(q)) {
+               dec_count(io, region, -EOPNOTSUPP);
+               return;
+       }
+
        /*
         * where->count may be zero if rw holds a flush and we need to
         * send a zero-sized flush.
index 7dfdb5c746d6f31960902350c33b7457485caadb..089d62751f7ff2a3aedf7e441cb88bec0d06b8a7 100644 (file)
@@ -604,6 +604,15 @@ static void write_callback(unsigned long error, void *context)
                return;
        }
 
+       /*
+        * If the bio is discard, return an error, but do not
+        * degrade the array.
+        */
+       if (bio->bi_rw & REQ_DISCARD) {
+               bio_endio(bio, -EOPNOTSUPP);
+               return;
+       }
+
        for (i = 0; i < ms->nr_mirrors; i++)
                if (test_bit(i, &error))
                        fail_mirror(ms->mirror + i, DM_RAID1_WRITE_ERROR);
index 864b03f477276f9c01bee8fd34a25e2303af1116..8b204ae216ab62d354c814277dc413c34f0bf9a4 100644 (file)
@@ -1432,8 +1432,6 @@ out:
                full_bio->bi_private = pe->full_bio_private;
                atomic_inc(&full_bio->bi_remaining);
        }
-       free_pending_exception(pe);
-
        increment_pending_exceptions_done_count();
 
        up_write(&s->lock);
@@ -1450,6 +1448,8 @@ out:
        }
 
        retry_origin_bios(s, origin_bios);
+
+       free_pending_exception(pe);
 }
 
 static void commit_callback(void *context, int success)
index ec1444f49de14ac185ae39cfb214deee3ba66998..73f28802dc7abc3cb46dc38c8ef6fb5bb521e66b 100644 (file)
@@ -2571,7 +2571,7 @@ int dm_setup_md_queue(struct mapped_device *md)
        return 0;
 }
 
-static struct mapped_device *dm_find_md(dev_t dev)
+struct mapped_device *dm_get_md(dev_t dev)
 {
        struct mapped_device *md;
        unsigned minor = MINOR(dev);
@@ -2582,12 +2582,15 @@ static struct mapped_device *dm_find_md(dev_t dev)
        spin_lock(&_minor_lock);
 
        md = idr_find(&_minor_idr, minor);
-       if (md && (md == MINOR_ALLOCED ||
-                  (MINOR(disk_devt(dm_disk(md))) != minor) ||
-                  dm_deleting_md(md) ||
-                  test_bit(DMF_FREEING, &md->flags))) {
-               md = NULL;
-               goto out;
+       if (md) {
+               if ((md == MINOR_ALLOCED ||
+                    (MINOR(disk_devt(dm_disk(md))) != minor) ||
+                    dm_deleting_md(md) ||
+                    test_bit(DMF_FREEING, &md->flags))) {
+                       md = NULL;
+                       goto out;
+               }
+               dm_get(md);
        }
 
 out:
@@ -2595,16 +2598,6 @@ out:
 
        return md;
 }
-
-struct mapped_device *dm_get_md(dev_t dev)
-{
-       struct mapped_device *md = dm_find_md(dev);
-
-       if (md)
-               dm_get(md);
-
-       return md;
-}
 EXPORT_SYMBOL_GPL(dm_get_md);
 
 void *dm_get_mdptr(struct mapped_device *md)
index 0c2dec7aec20fd798d45b24d92156b3f85f4aae0..78c74bb71ba42f11ff5035a5a593ed5732df9791 100644 (file)
@@ -8,7 +8,7 @@ config DM_PERSISTENT_DATA
         device-mapper targets such as the thin provisioning target.
 
 config DM_DEBUG_BLOCK_STACK_TRACING
-       boolean "Keep stack trace of persistent data block lock holders"
+       bool "Keep stack trace of persistent data block lock holders"
        depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
        select STACKTRACE
        ---help---
index cfbf9617e4658bd6aa84f45c840bda6f56a8402a..ebb280a14325e1d937986926b40592c3b1847168 100644 (file)
@@ -78,7 +78,9 @@ static int sm_disk_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b,
        if (r)
                return r;
 
-       return count > 1;
+       *result = count > 1;
+
+       return 0;
 }
 
 static int sm_disk_set_count(struct dm_space_map *sm, dm_block_t b,
index 6af0a28ba37dd6fe54cc44a4164734597b53886d..e8a4218b57267f508eb871f216de112e93707d31 100644 (file)
@@ -21,8 +21,6 @@
 #include <linux/err.h>
 
 #include <linux/clk.h>
-#include <linux/clk/sunxi.h>
-
 #include <linux/gpio.h>
 #include <linux/platform_device.h>
 #include <linux/spinlock.h>
@@ -229,6 +227,8 @@ struct sunxi_mmc_host {
        /* clock management */
        struct clk      *clk_ahb;
        struct clk      *clk_mmc;
+       struct clk      *clk_sample;
+       struct clk      *clk_output;
 
        /* irq */
        spinlock_t      lock;
@@ -653,26 +653,31 @@ static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host,
 
        /* determine delays */
        if (rate <= 400000) {
-               oclk_dly = 0;
-               sclk_dly = 7;
+               oclk_dly = 180;
+               sclk_dly = 42;
        } else if (rate <= 25000000) {
-               oclk_dly = 0;
-               sclk_dly = 5;
+               oclk_dly = 180;
+               sclk_dly = 75;
        } else if (rate <= 50000000) {
                if (ios->timing == MMC_TIMING_UHS_DDR50) {
-                       oclk_dly = 2;
-                       sclk_dly = 4;
+                       oclk_dly = 60;
+                       sclk_dly = 120;
                } else {
-                       oclk_dly = 3;
-                       sclk_dly = 5;
+                       oclk_dly = 90;
+                       sclk_dly = 150;
                }
+       } else if (rate <= 100000000) {
+               oclk_dly = 6;
+               sclk_dly = 24;
+       } else if (rate <= 200000000) {
+               oclk_dly = 3;
+               sclk_dly = 12;
        } else {
-               /* rate > 50000000 */
-               oclk_dly = 2;
-               sclk_dly = 4;
+               return -EINVAL;
        }
 
-       clk_sunxi_mmc_phase_control(host->clk_mmc, sclk_dly, oclk_dly);
+       clk_set_phase(host->clk_sample, sclk_dly);
+       clk_set_phase(host->clk_output, oclk_dly);
 
        return sunxi_mmc_oclk_onoff(host, 1);
 }
@@ -913,6 +918,18 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
                return PTR_ERR(host->clk_mmc);
        }
 
+       host->clk_output = devm_clk_get(&pdev->dev, "output");
+       if (IS_ERR(host->clk_output)) {
+               dev_err(&pdev->dev, "Could not get output clock\n");
+               return PTR_ERR(host->clk_output);
+       }
+
+       host->clk_sample = devm_clk_get(&pdev->dev, "sample");
+       if (IS_ERR(host->clk_sample)) {
+               dev_err(&pdev->dev, "Could not get sample clock\n");
+               return PTR_ERR(host->clk_sample);
+       }
+
        host->reset = devm_reset_control_get(&pdev->dev, "ahb");
 
        ret = clk_prepare_enable(host->clk_ahb);
@@ -927,11 +944,23 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
                goto error_disable_clk_ahb;
        }
 
+       ret = clk_prepare_enable(host->clk_output);
+       if (ret) {
+               dev_err(&pdev->dev, "Enable output clk err %d\n", ret);
+               goto error_disable_clk_mmc;
+       }
+
+       ret = clk_prepare_enable(host->clk_sample);
+       if (ret) {
+               dev_err(&pdev->dev, "Enable sample clk err %d\n", ret);
+               goto error_disable_clk_output;
+       }
+
        if (!IS_ERR(host->reset)) {
                ret = reset_control_deassert(host->reset);
                if (ret) {
                        dev_err(&pdev->dev, "reset err %d\n", ret);
-                       goto error_disable_clk_mmc;
+                       goto error_disable_clk_sample;
                }
        }
 
@@ -950,6 +979,10 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host,
 error_assert_reset:
        if (!IS_ERR(host->reset))
                reset_control_assert(host->reset);
+error_disable_clk_sample:
+       clk_disable_unprepare(host->clk_sample);
+error_disable_clk_output:
+       clk_disable_unprepare(host->clk_output);
 error_disable_clk_mmc:
        clk_disable_unprepare(host->clk_mmc);
 error_disable_clk_ahb:
index 3bc992cd70b7de4449afec00cdab816d9910ec22..f6a71092e1359ea095aad0aeca093065080ceb5c 100644 (file)
@@ -50,7 +50,7 @@ config TI_DAVINCI_CPDMA
          will be called davinci_cpdma.  This is recommended.
 
 config TI_CPSW_PHY_SEL
-       boolean "TI CPSW Switch Phy sel Support"
+       bool "TI CPSW Switch Phy sel Support"
        depends on TI_CPSW
        ---help---
          This driver supports configuring of the phy mode connected to
@@ -77,7 +77,7 @@ config TI_CPSW
          will be called cpsw.
 
 config TI_CPTS
-       boolean "TI Common Platform Time Sync (CPTS) Support"
+       bool "TI Common Platform Time Sync (CPTS) Support"
        depends on TI_CPSW
        select PTP_1588_CLOCK
        ---help---
index 37eed4d84e9cb458b2ac5d3b7248b0835115c67a..3bd9678315ad651beccd96ca1b5d74bcd1e68c1e 100644 (file)
@@ -397,14 +397,14 @@ config USB_NET_CDC_SUBSET
          not generally have permanently assigned Ethernet addresses.
 
 config USB_ALI_M5632
-       boolean "ALi M5632 based 'USB 2.0 Data Link' cables"
+       bool "ALi M5632 based 'USB 2.0 Data Link' cables"
        depends on USB_NET_CDC_SUBSET
        help
          Choose this option if you're using a host-to-host cable
          based on this design, which supports USB 2.0 high speed.
 
 config USB_AN2720
-       boolean "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
+       bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
        depends on USB_NET_CDC_SUBSET
        help
          Choose this option if you're using a host-to-host cable
@@ -412,7 +412,7 @@ config USB_AN2720
          Cypress brand.
 
 config USB_BELKIN
-       boolean "eTEK based host-to-host cables (Advance, Belkin, ...)"
+       bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
        depends on USB_NET_CDC_SUBSET
        default y
        help
@@ -421,7 +421,7 @@ config USB_BELKIN
          microcontroller, with LEDs that indicate traffic.
 
 config USB_ARMLINUX
-       boolean "Embedded ARM Linux links (iPaq, ...)"
+       bool "Embedded ARM Linux links (iPaq, ...)"
        depends on USB_NET_CDC_SUBSET
        default y
        help
@@ -438,14 +438,14 @@ config USB_ARMLINUX
          this simpler protocol by installing a different kernel.
 
 config USB_EPSON2888
-       boolean "Epson 2888 based firmware (DEVELOPMENT)"
+       bool "Epson 2888 based firmware (DEVELOPMENT)"
        depends on USB_NET_CDC_SUBSET
        help
          Choose this option to support the usb networking links used
          by some sample firmware from Epson.
 
 config USB_KC2190
-       boolean "KT Technology KC2190 based cables (InstaNet)"
+       bool "KT Technology KC2190 based cables (InstaNet)"
        depends on USB_NET_CDC_SUBSET
        help
          Choose this option if you're using a host-to-host cable
index 006b8bcb2e31dfc5c21d7ed602546003e4a9e54c..2b4ef256c6b9432675b2de9bae9b6cd95c6309d2 100644 (file)
@@ -243,14 +243,14 @@ config RT2X00_LIB
        select AVERAGE
 
 config RT2X00_LIB_FIRMWARE
-       boolean
+       bool
        select FW_LOADER
 
 config RT2X00_LIB_CRYPTO
-       boolean
+       bool
 
 config RT2X00_LIB_LEDS
-       boolean
+       bool
        default y if (RT2X00_LIB=y && LEDS_CLASS=y) || (RT2X00_LIB=m && LEDS_CLASS!=n)
 
 config RT2X00_LIB_DEBUGFS
index 389440228c1de104cc1dde0eafbc94697f6373d5..7d1437b01fdd8f586e705c3842ab4e798de8750f 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 config PCIEAER
-       boolean "Root Port Advanced Error Reporting support"
+       bool "Root Port Advanced Error Reporting support"
        depends on PCIEPORTBUS
        select RAS
        default y
index 638e797037da973f7756b416a47f37c3ae314025..97527614141bf4a406528503d404ad8efd64695f 100644 (file)
@@ -735,6 +735,31 @@ config INTEL_IPS
          functionality.  If in doubt, say Y here; it will only load on
          supported platforms.
 
+config INTEL_IMR
+       bool "Intel Isolated Memory Region support"
+       default n
+       depends on X86_INTEL_QUARK && IOSF_MBI
+       ---help---
+         This option provides a means to manipulate Isolated Memory Regions.
+         IMRs are a set of registers that define read and write access masks
+         to prohibit certain system agents from accessing memory with 1 KiB
+         granularity.
+
+         IMRs make it possible to control read/write access to an address
+         by hardware agents inside the SoC. Read and write masks can be
+         defined for:
+               - eSRAM flush
+               - Dirty CPU snoop (write only)
+               - RMU access
+               - PCI Virtual Channel 0/Virtual Channel 1
+               - SMM mode
+               - Non SMM mode
+
+         Quark contains a set of eight IMR registers and makes use of those
+         registers during its bootup process.
+
+         If you are running on a Galileo/Quark say Y here.
+
 config IBM_RTL
        tristate "Device driver to enable PRTL support"
        depends on X86 && PCI
index f71700e0d13212cb3a93701d939c9fced2b0f6ca..46b27469387283eed295e664a6c728a639860fa5 100644 (file)
@@ -856,8 +856,8 @@ static void asus_backlight_exit(struct asus_laptop *asus)
  * than count bytes. We set eof to 1 if we handle those 2 values. We return the
  * number of bytes written in page
  */
-static ssize_t show_infos(struct device *dev,
-                         struct device_attribute *attr, char *page)
+static ssize_t infos_show(struct device *dev, struct device_attribute *attr,
+                         char *page)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
        int len = 0;
@@ -926,6 +926,7 @@ static ssize_t show_infos(struct device *dev,
 
        return len;
 }
+static DEVICE_ATTR_RO(infos);
 
 static int parse_arg(const char *buf, unsigned long count, int *val)
 {
@@ -957,15 +958,15 @@ static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
 /*
  * LEDD display
  */
-static ssize_t show_ledd(struct device *dev,
-                        struct device_attribute *attr, char *buf)
+static ssize_t ledd_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "0x%08x\n", asus->ledd_status);
 }
 
-static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
+static ssize_t ledd_store(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
@@ -981,6 +982,7 @@ static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
        }
        return rv;
 }
+static DEVICE_ATTR_RW(ledd);
 
 /*
  * Wireless
@@ -1014,21 +1016,22 @@ static int asus_wlan_set(struct asus_laptop *asus, int status)
        return 0;
 }
 
-static ssize_t show_wlan(struct device *dev,
-                        struct device_attribute *attr, char *buf)
+static ssize_t wlan_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
 }
 
-static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
+static ssize_t wlan_store(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sysfs_acpi_set(asus, buf, count, METHOD_WLAN);
 }
+static DEVICE_ATTR_RW(wlan);
 
 /*e
  * Bluetooth
@@ -1042,15 +1045,15 @@ static int asus_bluetooth_set(struct asus_laptop *asus, int status)
        return 0;
 }
 
-static ssize_t show_bluetooth(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static ssize_t bluetooth_show(struct device *dev, struct device_attribute *attr,
+                             char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
 }
 
-static ssize_t store_bluetooth(struct device *dev,
+static ssize_t bluetooth_store(struct device *dev,
                               struct device_attribute *attr, const char *buf,
                               size_t count)
 {
@@ -1058,6 +1061,7 @@ static ssize_t store_bluetooth(struct device *dev,
 
        return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH);
 }
+static DEVICE_ATTR_RW(bluetooth);
 
 /*
  * Wimax
@@ -1071,22 +1075,22 @@ static int asus_wimax_set(struct asus_laptop *asus, int status)
        return 0;
 }
 
-static ssize_t show_wimax(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static ssize_t wimax_show(struct device *dev, struct device_attribute *attr,
+                         char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus_wireless_status(asus, WM_RSTS));
 }
 
-static ssize_t store_wimax(struct device *dev,
-                              struct device_attribute *attr, const char *buf,
-                              size_t count)
+static ssize_t wimax_store(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sysfs_acpi_set(asus, buf, count, METHOD_WIMAX);
 }
+static DEVICE_ATTR_RW(wimax);
 
 /*
  * Wwan
@@ -1100,22 +1104,22 @@ static int asus_wwan_set(struct asus_laptop *asus, int status)
        return 0;
 }
 
-static ssize_t show_wwan(struct device *dev,
-                             struct device_attribute *attr, char *buf)
+static ssize_t wwan_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus_wireless_status(asus, WW_RSTS));
 }
 
-static ssize_t store_wwan(struct device *dev,
-                              struct device_attribute *attr, const char *buf,
-                              size_t count)
+static ssize_t wwan_store(struct device *dev, struct device_attribute *attr,
+                         const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sysfs_acpi_set(asus, buf, count, METHOD_WWAN);
 }
+static DEVICE_ATTR_RW(wwan);
 
 /*
  * Display
@@ -1135,8 +1139,8 @@ static void asus_set_display(struct asus_laptop *asus, int value)
  * displays hooked up simultaneously, so be warned. See the acpi4asus README
  * for more info.
  */
-static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
-                         const char *buf, size_t count)
+static ssize_t display_store(struct device *dev, struct device_attribute *attr,
+                            const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
        int rv, value;
@@ -1146,6 +1150,7 @@ static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
                asus_set_display(asus, value);
        return rv;
 }
+static DEVICE_ATTR_WO(display);
 
 /*
  * Light Sens
@@ -1167,16 +1172,17 @@ static void asus_als_switch(struct asus_laptop *asus, int value)
        asus->light_switch = value;
 }
 
-static ssize_t show_lssw(struct device *dev,
-                        struct device_attribute *attr, char *buf)
+static ssize_t ls_switch_show(struct device *dev, struct device_attribute *attr,
+                             char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus->light_switch);
 }
 
-static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
-                         const char *buf, size_t count)
+static ssize_t ls_switch_store(struct device *dev,
+                              struct device_attribute *attr, const char *buf,
+                              size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
        int rv, value;
@@ -1187,6 +1193,7 @@ static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
 
        return rv;
 }
+static DEVICE_ATTR_RW(ls_switch);
 
 static void asus_als_level(struct asus_laptop *asus, int value)
 {
@@ -1195,16 +1202,16 @@ static void asus_als_level(struct asus_laptop *asus, int value)
        asus->light_level = value;
 }
 
-static ssize_t show_lslvl(struct device *dev,
-                         struct device_attribute *attr, char *buf)
+static ssize_t ls_level_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus->light_level);
 }
 
-static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
-                          const char *buf, size_t count)
+static ssize_t ls_level_store(struct device *dev, struct device_attribute *attr,
+                             const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
        int rv, value;
@@ -1218,6 +1225,7 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
 
        return rv;
 }
+static DEVICE_ATTR_RW(ls_level);
 
 static int pega_int_read(struct asus_laptop *asus, int arg, int *result)
 {
@@ -1234,8 +1242,8 @@ static int pega_int_read(struct asus_laptop *asus, int arg, int *result)
        return err;
 }
 
-static ssize_t show_lsvalue(struct device *dev,
-                           struct device_attribute *attr, char *buf)
+static ssize_t ls_value_show(struct device *dev, struct device_attribute *attr,
+                            char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
        int err, hi, lo;
@@ -1247,6 +1255,7 @@ static ssize_t show_lsvalue(struct device *dev,
                return sprintf(buf, "%d\n", 10 * hi + lo);
        return err;
 }
+static DEVICE_ATTR_RO(ls_value);
 
 /*
  * GPS
@@ -1274,15 +1283,15 @@ static int asus_gps_switch(struct asus_laptop *asus, int status)
        return 0;
 }
 
-static ssize_t show_gps(struct device *dev,
-                       struct device_attribute *attr, char *buf)
+static ssize_t gps_show(struct device *dev, struct device_attribute *attr,
+                       char *buf)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", asus_gps_status(asus));
 }
 
-static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
+static ssize_t gps_store(struct device *dev, struct device_attribute *attr,
                         const char *buf, size_t count)
 {
        struct asus_laptop *asus = dev_get_drvdata(dev);
@@ -1298,6 +1307,7 @@ static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
        rfkill_set_sw_state(asus->gps.rfkill, !value);
        return rv;
 }
+static DEVICE_ATTR_RW(gps);
 
 /*
  * rfkill
@@ -1569,19 +1579,6 @@ static void asus_acpi_notify(struct acpi_device *device, u32 event)
        asus_input_notify(asus, event);
 }
 
-static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL);
-static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan);
-static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR,
-                  show_bluetooth, store_bluetooth);
-static DEVICE_ATTR(wimax, S_IRUGO | S_IWUSR, show_wimax, store_wimax);
-static DEVICE_ATTR(wwan, S_IRUGO | S_IWUSR, show_wwan, store_wwan);
-static DEVICE_ATTR(display, S_IWUSR, NULL, store_disp);
-static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
-static DEVICE_ATTR(ls_value, S_IRUGO, show_lsvalue, NULL);
-static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
-static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
-static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps);
-
 static struct attribute *asus_attributes[] = {
        &dev_attr_infos.attr,
        &dev_attr_wlan.attr,
@@ -1616,7 +1613,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
                else
                        goto normal;
 
-               return supported;
+               return supported ? attr->mode : 0;
        }
 
 normal:
index 70d355a9ae2cc2f7cdc2efb04941a2c76688139f..55cf10bc78174b2b50094ec3aebf59ded10847b1 100644 (file)
@@ -520,7 +520,7 @@ static acpi_status cmpc_get_accel(acpi_handle handle,
 {
        union acpi_object param[2];
        struct acpi_object_list input;
-       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, 0 };
+       struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
        unsigned char *locs;
        acpi_status status;
 
index 7c21c1c44dfa9acc05bcdf235f12fff4fd8969b0..2a9afa261c615bffb1f1586f2fcb3a287fd33d4b 100644 (file)
@@ -64,6 +64,7 @@
 #include <linux/acpi.h>
 #include <linux/dmi.h>
 #include <linux/backlight.h>
+#include <linux/fb.h>
 #include <linux/input.h>
 #include <linux/kfifo.h>
 #include <linux/platform_device.h>
@@ -398,7 +399,7 @@ static int bl_get_brightness(struct backlight_device *b)
 static int bl_update_status(struct backlight_device *b)
 {
        int ret;
-       if (b->props.power == 4)
+       if (b->props.power == FB_BLANK_POWERDOWN)
                ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x3);
        else
                ret = call_fext_func(FUNC_BACKLIGHT, 0x1, 0x4, 0x0);
@@ -1139,9 +1140,9 @@ static int __init fujitsu_init(void)
 
        if (!acpi_video_backlight_support()) {
                if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
-                       fujitsu->bl_device->props.power = 4;
+                       fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN;
                else
-                       fujitsu->bl_device->props.power = 0;
+                       fujitsu->bl_device->props.power = FB_BLANK_UNBLANK;
        }
 
        pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
index 66a4d3284aab7cca2cef0ecb85b792d39013bfbb..001b199a8c33d3a90e6693edd6475a63bf6dc6a5 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * intel_scu_ipc.c: Driver for the Intel SCU IPC mechanism
  *
- * (C) Copyright 2008-2010 Intel Corporation
+ * (C) Copyright 2008-2010,2015 Intel Corporation
  * Author: Sreedhara DS (sreedhara.ds@intel.com)
  *
  * This program is free software; you can redistribute it and/or
 /*
  * IPC register summary
  *
- * IPC register blocks are memory mapped at fixed address of 0xFF11C000
+ * IPC register blocks are memory mapped at fixed address of PCI BAR 0.
  * To read or write information to the SCU, driver writes to IPC-1 memory
- * mapped registers (base address 0xFF11C000). The following is the IPC
- * mechanism
+ * mapped registers. The following is the IPC mechanism
  *
  * 1. IA core cDMI interface claims this transaction and converts it to a
  *    Transaction Layer Packet (TLP) message which is sent across the cDMI.
 #define PCI_DEVICE_ID_CLOVERVIEW       0x08ea
 #define PCI_DEVICE_ID_TANGIER          0x11a0
 
-/* intel scu ipc driver data*/
+/* intel scu ipc driver data */
 struct intel_scu_ipc_pdata_t {
-       u32 ipc_base;
        u32 i2c_base;
-       u32 ipc_len;
        u32 i2c_len;
        u8 irq_mode;
 };
 
 static struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = {
-       .ipc_base = 0xff11c000,
        .i2c_base = 0xff12b000,
-       .ipc_len = 0x100,
        .i2c_len = 0x10,
        .irq_mode = 0,
 };
 
 /* Penwell and Cloverview */
 static struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = {
-       .ipc_base = 0xff11c000,
        .i2c_base = 0xff12b000,
-       .ipc_len = 0x100,
        .i2c_len = 0x10,
        .irq_mode = 1,
 };
 
 static struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = {
-       .ipc_base = 0xff009000,
        .i2c_base  = 0xff00d000,
-       .ipc_len  = 0x100,
        .i2c_len = 0x10,
        .irq_mode = 0,
 };
@@ -114,8 +105,6 @@ struct intel_scu_ipc_dev {
 
 static struct intel_scu_ipc_dev  ipcdev; /* Only one for now */
 
-static int platform;           /* Platform type */
-
 /*
  * IPC Read Buffer (Read Only):
  * 16 byte buffer for receiving data from SCU, if IPC command
@@ -160,7 +149,6 @@ static inline void ipc_data_writel(u32 data, u32 offset) /* Write ipc data */
  * Format:
  * |rfu3(8)|error code(8)|initiator id(8)|cmd id(4)|rfu1(2)|error(1)|busy(1)|
  */
-
 static inline u8 ipc_read_status(void)
 {
        return __raw_readl(ipcdev.ipc_base + 0x04);
@@ -176,23 +164,24 @@ static inline u32 ipc_data_readl(u32 offset) /* Read ipc u32 data */
        return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset);
 }
 
-static inline int busy_loop(void) /* Wait till scu status is busy */
+/* Wait till scu status is busy */
+static inline int busy_loop(void)
 {
-       u32 status = 0;
-       u32 loop_count = 0;
+       u32 status = ipc_read_status();
+       u32 loop_count = 100000;
 
-       status = ipc_read_status();
-       while (status & 1) {
+       /* break if scu doesn't reset busy bit after huge retry */
+       while ((status & BIT(0)) && --loop_count) {
                udelay(1); /* scu processing time is in few u secods */
                status = ipc_read_status();
-               loop_count++;
-               /* break if scu doesn't reset busy bit after huge retry */
-               if (loop_count > 100000) {
-                       dev_err(&ipcdev.pdev->dev, "IPC timed out");
-                       return -ETIMEDOUT;
-               }
        }
-       if ((status >> 1) & 1)
+
+       if (status & BIT(0)) {
+               dev_err(&ipcdev.pdev->dev, "IPC timed out");
+               return -ETIMEDOUT;
+       }
+
+       if (status & BIT(1))
                return -EIO;
 
        return 0;
@@ -210,14 +199,13 @@ static inline int ipc_wait_for_interrupt(void)
        }
 
        status = ipc_read_status();
-
-       if ((status >> 1) & 1)
+       if (status & BIT(1))
                return -EIO;
 
        return 0;
 }
 
-int intel_scu_ipc_check_status(void)
+static int intel_scu_ipc_check_status(void)
 {
        return ipcdev.irq_mode ? ipc_wait_for_interrupt() : busy_loop();
 }
@@ -248,18 +236,18 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id)
        if (id == IPC_CMD_PCNTRL_R) {
                for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
                        ipc_data_writel(wbuf[nc], offset);
-               ipc_command((count*2) << 16 |  id << 12 | 0 << 8 | op);
+               ipc_command((count * 2) << 16 | id << 12 | 0 << 8 | op);
        } else if (id == IPC_CMD_PCNTRL_W) {
                for (nc = 0; nc < count; nc++, offset += 1)
                        cbuf[offset] = data[nc];
                for (nc = 0, offset = 0; nc < count; nc++, offset += 4)
                        ipc_data_writel(wbuf[nc], offset);
-               ipc_command((count*3) << 16 |  id << 12 | 0 << 8 | op);
+               ipc_command((count * 3) << 16 | id << 12 | 0 << 8 | op);
        } else if (id == IPC_CMD_PCNTRL_M) {
                cbuf[offset] = data[0];
                cbuf[offset + 1] = data[1];
                ipc_data_writel(wbuf[0], 0); /* Write wbuff */
-               ipc_command(4 << 16 |  id << 12 | 0 << 8 | op);
+               ipc_command(4 << 16 | id << 12 | 0 << 8 | op);
        }
 
        err = intel_scu_ipc_check_status();
@@ -301,7 +289,7 @@ EXPORT_SYMBOL(intel_scu_ipc_ioread8);
  */
 int intel_scu_ipc_ioread16(u16 addr, u16 *data)
 {
-       u16 x[2] = {addr, addr + 1 };
+       u16 x[2] = {addr, addr + 1};
        return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R);
 }
 EXPORT_SYMBOL(intel_scu_ipc_ioread16);
@@ -351,7 +339,7 @@ EXPORT_SYMBOL(intel_scu_ipc_iowrite8);
  */
 int intel_scu_ipc_iowrite16(u16 addr, u16 data)
 {
-       u16 x[2] = {addr, addr + 1 };
+       u16 x[2] = {addr, addr + 1};
        return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W);
 }
 EXPORT_SYMBOL(intel_scu_ipc_iowrite16);
@@ -412,7 +400,6 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len)
 }
 EXPORT_SYMBOL(intel_scu_ipc_writev);
 
-
 /**
  *     intel_scu_ipc_update_register   -       r/m/w a register
  *     @addr: register address
@@ -475,9 +462,8 @@ EXPORT_SYMBOL(intel_scu_ipc_simple_command);
  *     Issue a command to the SCU which involves data transfers. Do the
  *     data copies under the lock but leave it for the caller to interpret
  */
-
 int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
-                                                       u32 *out, int outlen)
+                         u32 *out, int outlen)
 {
        int i, err;
 
@@ -503,7 +489,7 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen,
 }
 EXPORT_SYMBOL(intel_scu_ipc_command);
 
-/*I2C commands */
+/* I2C commands */
 #define IPC_I2C_WRITE 1 /* I2C Write command */
 #define IPC_I2C_READ  2 /* I2C Read command */
 
@@ -577,7 +563,7 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
 {
        int err;
        struct intel_scu_ipc_pdata_t *pdata;
-       resource_size_t pci_resource;
+       resource_size_t base;
 
        if (ipcdev.pdev)                /* We support only one SCU */
                return -EBUSY;
@@ -595,8 +581,8 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (err)
                return err;
 
-       pci_resource = pci_resource_start(dev, 0);
-       if (!pci_resource)
+       base = pci_resource_start(dev, 0);
+       if (!base)
                return -ENOMEM;
 
        init_completion(&ipcdev.cmd_complete);
@@ -604,7 +590,7 @@ static int ipc_probe(struct pci_dev *dev, const struct pci_device_id *id)
        if (request_irq(dev->irq, ioc, 0, "intel_scu_ipc", &ipcdev))
                return -EBUSY;
 
-       ipcdev.ipc_base = ioremap_nocache(pdata->ipc_base, pdata->ipc_len);
+       ipcdev.ipc_base = ioremap_nocache(base, pci_resource_len(dev, 0));
        if (!ipcdev.ipc_base)
                return -ENOMEM;
 
@@ -666,9 +652,10 @@ static struct pci_driver ipc_driver = {
        .remove = ipc_remove,
 };
 
-
 static int __init intel_scu_ipc_init(void)
 {
+       int platform;           /* Platform type */
+
        platform = intel_mid_identify_cpu();
        if (platform == 0)
                return -ENODEV;
index ff765d8e1a09f648ad40770cdd59cf6aadaaf7e8..9e701b2256f9571afca37e01a7185fe8e63e8399 100644 (file)
@@ -124,6 +124,10 @@ struct sabi_commands {
        u16 get_wireless_status;
        u16 set_wireless_status;
 
+       /* 0x80 is off, 0x81 is on */
+       u16 get_lid_handling;
+       u16 set_lid_handling;
+
        /* 0x81 to read, (0x82 | level << 8) to set, 0xaabb to enable */
        u16 kbd_backlight;
 
@@ -194,6 +198,9 @@ static const struct sabi_config sabi_configs[] = {
                        .get_wireless_status = 0xFFFF,
                        .set_wireless_status = 0xFFFF,
 
+                       .get_lid_handling = 0xFFFF,
+                       .set_lid_handling = 0xFFFF,
+
                        .kbd_backlight = 0xFFFF,
 
                        .set_linux = 0x0a,
@@ -254,6 +261,9 @@ static const struct sabi_config sabi_configs[] = {
                        .get_wireless_status = 0x69,
                        .set_wireless_status = 0x6a,
 
+                       .get_lid_handling = 0x6d,
+                       .set_lid_handling = 0x6e,
+
                        .kbd_backlight = 0x78,
 
                        .set_linux = 0xff,
@@ -353,6 +363,8 @@ struct samsung_quirks {
        bool broken_acpi_video;
        bool four_kbd_backlight_levels;
        bool enable_kbd_backlight;
+       bool use_native_backlight;
+       bool lid_handling;
 };
 
 static struct samsung_quirks samsung_unknown = {};
@@ -361,11 +373,19 @@ static struct samsung_quirks samsung_broken_acpi_video = {
        .broken_acpi_video = true,
 };
 
+static struct samsung_quirks samsung_use_native_backlight = {
+       .use_native_backlight = true,
+};
+
 static struct samsung_quirks samsung_np740u3e = {
        .four_kbd_backlight_levels = true,
        .enable_kbd_backlight = true,
 };
 
+static struct samsung_quirks samsung_lid_handling = {
+       .lid_handling = true,
+};
+
 static bool force;
 module_param(force, bool, 0);
 MODULE_PARM_DESC(force,
@@ -748,7 +768,7 @@ static ssize_t set_battery_life_extender(struct device *dev,
        struct samsung_laptop *samsung = dev_get_drvdata(dev);
        int ret, value;
 
-       if (!count || sscanf(buf, "%i", &value) != 1)
+       if (!count || kstrtoint(buf, 0, &value) != 0)
                return -EINVAL;
 
        ret = write_battery_life_extender(samsung, !!value);
@@ -817,7 +837,7 @@ static ssize_t set_usb_charge(struct device *dev,
        struct samsung_laptop *samsung = dev_get_drvdata(dev);
        int ret, value;
 
-       if (!count || sscanf(buf, "%i", &value) != 1)
+       if (!count || kstrtoint(buf, 0, &value) != 0)
                return -EINVAL;
 
        ret = write_usb_charge(samsung, !!value);
@@ -830,10 +850,76 @@ static ssize_t set_usb_charge(struct device *dev,
 static DEVICE_ATTR(usb_charge, S_IWUSR | S_IRUGO,
                   get_usb_charge, set_usb_charge);
 
+static int read_lid_handling(struct samsung_laptop *samsung)
+{
+       const struct sabi_commands *commands = &samsung->config->commands;
+       struct sabi_data data;
+       int retval;
+
+       if (commands->get_lid_handling == 0xFFFF)
+               return -ENODEV;
+
+       memset(&data, 0, sizeof(data));
+       retval = sabi_command(samsung, commands->get_lid_handling,
+                             &data, &data);
+
+       if (retval)
+               return retval;
+
+       return data.data[0] & 0x1;
+}
+
+static int write_lid_handling(struct samsung_laptop *samsung,
+                             int enabled)
+{
+       const struct sabi_commands *commands = &samsung->config->commands;
+       struct sabi_data data;
+
+       memset(&data, 0, sizeof(data));
+       data.data[0] = 0x80 | enabled;
+       return sabi_command(samsung, commands->set_lid_handling,
+                           &data, NULL);
+}
+
+static ssize_t get_lid_handling(struct device *dev,
+                               struct device_attribute *attr,
+                               char *buf)
+{
+       struct samsung_laptop *samsung = dev_get_drvdata(dev);
+       int ret;
+
+       ret = read_lid_handling(samsung);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", ret);
+}
+
+static ssize_t set_lid_handling(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t count)
+{
+       struct samsung_laptop *samsung = dev_get_drvdata(dev);
+       int ret, value;
+
+       if (!count || kstrtoint(buf, 0, &value) != 0)
+               return -EINVAL;
+
+       ret = write_lid_handling(samsung, !!value);
+       if (ret < 0)
+               return ret;
+
+       return count;
+}
+
+static DEVICE_ATTR(lid_handling, S_IWUSR | S_IRUGO,
+                  get_lid_handling, set_lid_handling);
+
 static struct attribute *platform_attributes[] = {
        &dev_attr_performance_level.attr,
        &dev_attr_battery_life_extender.attr,
        &dev_attr_usb_charge.attr,
+       &dev_attr_lid_handling.attr,
        NULL
 };
 
@@ -956,6 +1042,22 @@ static int __init samsung_rfkill_init(struct samsung_laptop *samsung)
        return 0;
 }
 
+static void samsung_lid_handling_exit(struct samsung_laptop *samsung)
+{
+       if (samsung->quirks->lid_handling)
+               write_lid_handling(samsung, 0);
+}
+
+static int __init samsung_lid_handling_init(struct samsung_laptop *samsung)
+{
+       int retval = 0;
+
+       if (samsung->quirks->lid_handling)
+               retval = write_lid_handling(samsung, 1);
+
+       return retval;
+}
+
 static int kbd_backlight_enable(struct samsung_laptop *samsung)
 {
        const struct sabi_commands *commands = &samsung->config->commands;
@@ -1111,7 +1213,7 @@ static int __init samsung_backlight_init(struct samsung_laptop *samsung)
 }
 
 static umode_t samsung_sysfs_is_visible(struct kobject *kobj,
-                                      struct attribute *attr, int idx)
+                                       struct attribute *attr, int idx)
 {
        struct device *dev = container_of(kobj, struct device, kobj);
        struct platform_device *pdev = to_platform_device(dev);
@@ -1124,6 +1226,8 @@ static umode_t samsung_sysfs_is_visible(struct kobject *kobj,
                ok = !!(read_battery_life_extender(samsung) >= 0);
        if (attr == &dev_attr_usb_charge.attr)
                ok = !!(read_usb_charge(samsung) >= 0);
+       if (attr == &dev_attr_lid_handling.attr)
+               ok = !!(read_lid_handling(samsung) >= 0);
 
        return ok ? attr->mode : 0;
 }
@@ -1357,7 +1461,7 @@ static int __init samsung_sabi_init(struct samsung_laptop *samsung)
        samsung_sabi_diag(samsung);
 
        /* Try to find one of the signatures in memory to find the header */
-       for (i = 0; sabi_configs[i].test_string != 0; ++i) {
+       for (i = 0; sabi_configs[i].test_string != NULL; ++i) {
                samsung->config = &sabi_configs[i];
                loca = find_signature(samsung->f0000_segment,
                                      samsung->config->test_string);
@@ -1436,6 +1540,9 @@ static int samsung_pm_notification(struct notifier_block *nb,
            samsung->quirks->enable_kbd_backlight)
                kbd_backlight_enable(samsung);
 
+       if (val == PM_POST_HIBERNATION && samsung->quirks->lid_handling)
+               write_lid_handling(samsung, 1);
+
        return 0;
 }
 
@@ -1507,7 +1614,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "N150P"),
                DMI_MATCH(DMI_BOARD_NAME, "N150P"),
                },
-        .driver_data = &samsung_broken_acpi_video,
+        .driver_data = &samsung_use_native_backlight,
        },
        {
         .callback = samsung_dmi_matched,
@@ -1517,7 +1624,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "N145P/N250P/N260P"),
                DMI_MATCH(DMI_BOARD_NAME, "N145P/N250P/N260P"),
                },
-        .driver_data = &samsung_broken_acpi_video,
+        .driver_data = &samsung_use_native_backlight,
        },
        {
         .callback = samsung_dmi_matched,
@@ -1557,7 +1664,7 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "N250P"),
                DMI_MATCH(DMI_BOARD_NAME, "N250P"),
                },
-        .driver_data = &samsung_broken_acpi_video,
+        .driver_data = &samsung_use_native_backlight,
        },
        {
         .callback = samsung_dmi_matched,
@@ -1578,6 +1685,15 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
                },
         .driver_data = &samsung_np740u3e,
        },
+       {
+        .callback = samsung_dmi_matched,
+        .ident = "300V3Z/300V4Z/300V5Z",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "300V3Z/300V4Z/300V5Z"),
+               },
+        .driver_data = &samsung_lid_handling,
+       },
        { },
 };
 MODULE_DEVICE_TABLE(dmi, samsung_dmi_table);
@@ -1616,6 +1732,15 @@ static int __init samsung_init(void)
                pr_info("Disabling ACPI video driver\n");
                acpi_video_unregister();
        }
+
+       if (samsung->quirks->use_native_backlight) {
+               pr_info("Using native backlight driver\n");
+               /* Tell acpi-video to not handle the backlight */
+               acpi_video_dmi_promote_vendor();
+               acpi_video_unregister();
+               /* And also do not handle it ourselves */
+               samsung->handle_backlight = false;
+       }
 #endif
 
        ret = samsung_platform_init(samsung);
@@ -1648,6 +1773,10 @@ static int __init samsung_init(void)
        if (ret)
                goto error_leds;
 
+       ret = samsung_lid_handling_init(samsung);
+       if (ret)
+               goto error_lid_handling;
+
        ret = samsung_debugfs_init(samsung);
        if (ret)
                goto error_debugfs;
@@ -1659,6 +1788,8 @@ static int __init samsung_init(void)
        return ret;
 
 error_debugfs:
+       samsung_lid_handling_exit(samsung);
+error_lid_handling:
        samsung_leds_exit(samsung);
 error_leds:
        samsung_rfkill_exit(samsung);
@@ -1683,6 +1814,7 @@ static void __exit samsung_exit(void)
        unregister_pm_notifier(&samsung->pm_nb);
 
        samsung_debugfs_exit(samsung);
+       samsung_lid_handling_exit(samsung);
        samsung_leds_exit(samsung);
        samsung_rfkill_exit(samsung);
        samsung_backlight_exit(samsung);
index 6dd1c0e7dcd9af81a43db55f0c4ba87956e37dde..e51c1e7536077306234e579121cf18badcb45a01 100644 (file)
@@ -1032,7 +1032,7 @@ struct sony_backlight_props {
        u8                      offset;
        u8                      maxlvl;
 };
-struct sony_backlight_props sony_bl_props;
+static struct sony_backlight_props sony_bl_props;
 
 static int sony_backlight_update_status(struct backlight_device *bd)
 {
index c3d11fabc46f21c98c6122497958837ae291b171..3b8ceee7c5cbfd891c87a5fd58c59017db5f34e8 100644 (file)
@@ -196,6 +196,7 @@ enum tpacpi_hkey_event_t {
        /* Key-related user-interface events */
        TP_HKEY_EV_KEY_NUMLOCK          = 0x6000, /* NumLock key pressed */
        TP_HKEY_EV_KEY_FN               = 0x6005, /* Fn key pressed? E420 */
+       TP_HKEY_EV_KEY_FN_ESC           = 0x6060, /* Fn+Esc key pressed X240 */
 
        /* Thermal events */
        TP_HKEY_EV_ALARM_BAT_HOT        = 0x6011, /* battery too hot */
@@ -3456,7 +3457,7 @@ enum ADAPTIVE_KEY_MODE {
        LAYFLAT_MODE
 };
 
-const int adaptive_keyboard_modes[] = {
+static const int adaptive_keyboard_modes[] = {
        HOME_MODE,
 /*     WEB_BROWSER_MODE = 2,
        WEB_CONFERENCE_MODE = 3, */
@@ -3712,6 +3713,7 @@ static bool hotkey_notify_6xxx(const u32 hkey,
 
        case TP_HKEY_EV_KEY_NUMLOCK:
        case TP_HKEY_EV_KEY_FN:
+       case TP_HKEY_EV_KEY_FN_ESC:
                /* key press events, we just ignore them as long as the EC
                 * is still reporting them in the normal keyboard stream */
                *send_acpi_ev = false;
@@ -8883,17 +8885,31 @@ static bool __pure __init tpacpi_is_fw_digit(const char c)
        return (c >= '0' && c <= '9') || (c >= 'A' && c <= 'Z');
 }
 
-/* Most models: xxyTkkWW (#.##c); Ancient 570/600 and -SL lacks (#.##c) */
 static bool __pure __init tpacpi_is_valid_fw_id(const char * const s,
                                                const char t)
 {
-       return s && strlen(s) >= 8 &&
+       /*
+        * Most models: xxyTkkWW (#.##c)
+        * Ancient 570/600 and -SL lacks (#.##c)
+        */
+       if (s && strlen(s) >= 8 &&
                tpacpi_is_fw_digit(s[0]) &&
                tpacpi_is_fw_digit(s[1]) &&
                s[2] == t &&
                (s[3] == 'T' || s[3] == 'N') &&
                tpacpi_is_fw_digit(s[4]) &&
-               tpacpi_is_fw_digit(s[5]);
+               tpacpi_is_fw_digit(s[5]))
+               return true;
+
+       /* New models: xxxyTkkW (#.##c); T550 and some others */
+       return s && strlen(s) >= 8 &&
+               tpacpi_is_fw_digit(s[0]) &&
+               tpacpi_is_fw_digit(s[1]) &&
+               tpacpi_is_fw_digit(s[2]) &&
+               s[3] == t &&
+               (s[4] == 'T' || s[4] == 'N') &&
+               tpacpi_is_fw_digit(s[5]) &&
+               tpacpi_is_fw_digit(s[6]);
 }
 
 /* returns 0 - probe ok, or < 0 - probe error.
index fc34a71866ed067624c2a851c29a182e9be48f08..dbcb7a8915b84fb8794fd4daea15825e55a49cdd 100644 (file)
@@ -1,11 +1,10 @@
 /*
  *  toshiba_acpi.c - Toshiba Laptop ACPI Extras
  *
- *
  *  Copyright (C) 2002-2004 John Belmonte
  *  Copyright (C) 2008 Philip Langdale
  *  Copyright (C) 2010 Pierre Ducroquet
- *  Copyright (C) 2014 Azael Avalos
+ *  Copyright (C) 2014-2015 Azael Avalos
  *
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  *  GNU General Public License for more details.
  *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
  *
  *  The devolpment page for this driver is located at
  *  http://memebeam.org/toys/ToshibaAcpiDriver.
  *             engineering the Windows drivers
  *     Yasushi Nagato - changes for linux kernel 2.4 -> 2.5
  *     Rob Miller - TV out and hotkeys help
- *
- *
- *  TODO
- *
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
-#define TOSHIBA_ACPI_VERSION   "0.20"
+#define TOSHIBA_ACPI_VERSION   "0.21"
 #define PROC_INTERFACE_VERSION 1
 
 #include <linux/kernel.h>
@@ -57,7 +50,7 @@
 #include <linux/i8042.h>
 #include <linux/acpi.h>
 #include <linux/dmi.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 MODULE_AUTHOR("John Belmonte");
 MODULE_DESCRIPTION("Toshiba Laptop ACPI Extras Driver");
@@ -71,7 +64,8 @@ MODULE_LICENSE("GPL");
 /* Toshiba ACPI method paths */
 #define METHOD_VIDEO_OUT       "\\_SB_.VALX.DSSX"
 
-/* The Toshiba configuration interface is composed of the HCI and the SCI,
+/*
+ * The Toshiba configuration interface is composed of the HCI and the SCI,
  * which are defined as follows:
  *
  * HCI is Toshiba's "Hardware Control Interface" which is supposed to
@@ -108,6 +102,7 @@ MODULE_LICENSE("GPL");
 #define TOS_FIFO_EMPTY                 0x8c00
 #define TOS_DATA_NOT_AVAILABLE         0x8d20
 #define TOS_NOT_INITIALIZED            0x8d50
+#define TOS_NOT_INSTALLED              0x8e00
 
 /* registers */
 #define HCI_FAN                                0x0004
@@ -121,9 +116,14 @@ MODULE_LICENSE("GPL");
 #define HCI_KBD_ILLUMINATION           0x0095
 #define HCI_ECO_MODE                   0x0097
 #define HCI_ACCELEROMETER2             0x00a6
+#define SCI_PANEL_POWER_ON             0x010d
 #define SCI_ILLUMINATION               0x014e
+#define SCI_USB_SLEEP_CHARGE           0x0150
 #define SCI_KBD_ILLUM_STATUS           0x015c
+#define SCI_USB_SLEEP_MUSIC            0x015e
+#define SCI_USB_THREE                  0x0169
 #define SCI_TOUCHPAD                   0x050e
+#define SCI_KBD_FUNCTION_KEYS          0x0522
 
 /* field definitions */
 #define HCI_ACCEL_MASK                 0x7fff
@@ -146,6 +146,15 @@ MODULE_LICENSE("GPL");
 #define SCI_KBD_MODE_ON                        0x8
 #define SCI_KBD_MODE_OFF               0x10
 #define SCI_KBD_TIME_MAX               0x3c001a
+#define SCI_USB_CHARGE_MODE_MASK       0xff
+#define SCI_USB_CHARGE_DISABLED                0x30000
+#define SCI_USB_CHARGE_ALTERNATE       0x30009
+#define SCI_USB_CHARGE_AUTO            0x30021
+#define SCI_USB_CHARGE_BAT_MASK                0x7
+#define SCI_USB_CHARGE_BAT_LVL_OFF     0x1
+#define SCI_USB_CHARGE_BAT_LVL_ON      0x4
+#define SCI_USB_CHARGE_BAT_LVL         0x0200
+#define SCI_USB_CHARGE_RAPID_DSP       0x0300
 
 struct toshiba_acpi_dev {
        struct acpi_device *acpi_dev;
@@ -164,6 +173,7 @@ struct toshiba_acpi_dev {
        int kbd_type;
        int kbd_mode;
        int kbd_time;
+       int usbsc_bat_level;
 
        unsigned int illumination_supported:1;
        unsigned int video_supported:1;
@@ -177,6 +187,12 @@ struct toshiba_acpi_dev {
        unsigned int touchpad_supported:1;
        unsigned int eco_supported:1;
        unsigned int accelerometer_supported:1;
+       unsigned int usb_sleep_charge_supported:1;
+       unsigned int usb_rapid_charge_supported:1;
+       unsigned int usb_sleep_music_supported:1;
+       unsigned int kbd_function_keys_supported:1;
+       unsigned int panel_power_on_supported:1;
+       unsigned int usb_three_supported:1;
        unsigned int sysfs_created:1;
 
        struct mutex mutex;
@@ -264,15 +280,17 @@ static const struct key_entry toshiba_acpi_alt_keymap[] = {
        { KE_END, 0 },
 };
 
-/* utility
+/*
+ * Utility
  */
 
-static __inline__ void _set_bit(u32 * word, u32 mask, int value)
+static inline void _set_bit(u32 *word, u32 mask, int value)
 {
        *word = (*word & ~mask) | (mask * value);
 }
 
-/* acpi interface wrappers
+/*
+ * ACPI interface wrappers
  */
 
 static int write_acpi_int(const char *methodName, int val)
@@ -283,7 +301,8 @@ static int write_acpi_int(const char *methodName, int val)
        return (status == AE_OK) ? 0 : -EIO;
 }
 
-/* Perform a raw configuration call.  Here we don't care about input or output
+/*
+ * Perform a raw configuration call.  Here we don't care about input or output
  * buffer format.
  */
 static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
@@ -310,15 +329,15 @@ static acpi_status tci_raw(struct toshiba_acpi_dev *dev,
                                      (char *)dev->method_hci, &params,
                                      &results);
        if ((status == AE_OK) && (out_objs->package.count <= TCI_WORDS)) {
-               for (i = 0; i < out_objs->package.count; ++i) {
+               for (i = 0; i < out_objs->package.count; ++i)
                        out[i] = out_objs->package.elements[i].integer.value;
-               }
        }
 
        return status;
 }
 
-/* common hci tasks (get or set one or two value)
+/*
+ * Common hci tasks (get or set one or two value)
  *
  * In addition to the ACPI status, the HCI system returns a result which
  * may be useful (such as "not supported").
@@ -338,6 +357,7 @@ static u32 hci_read1(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
        u32 in[TCI_WORDS] = { HCI_GET, reg, 0, 0, 0, 0 };
        u32 out[TCI_WORDS];
        acpi_status status = tci_raw(dev, in, out);
+
        if (ACPI_FAILURE(status))
                return TOS_FAILURE;
 
@@ -355,11 +375,13 @@ static u32 hci_write2(struct toshiba_acpi_dev *dev, u32 reg, u32 in1, u32 in2)
        return ACPI_SUCCESS(status) ? out[0] : TOS_FAILURE;
 }
 
-static u32 hci_read2(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1, u32 *out2)
+static u32 hci_read2(struct toshiba_acpi_dev *dev,
+                    u32 reg, u32 *out1, u32 *out2)
 {
        u32 in[TCI_WORDS] = { HCI_GET, reg, *out1, *out2, 0, 0 };
        u32 out[TCI_WORDS];
        acpi_status status = tci_raw(dev, in, out);
+
        if (ACPI_FAILURE(status))
                return TOS_FAILURE;
 
@@ -369,7 +391,8 @@ static u32 hci_read2(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1, u32 *out2
        return out[0];
 }
 
-/* common sci tasks
+/*
+ * Common sci tasks
  */
 
 static int sci_open(struct toshiba_acpi_dev *dev)
@@ -389,6 +412,20 @@ static int sci_open(struct toshiba_acpi_dev *dev)
        } else if (out[0] == TOS_ALREADY_OPEN) {
                pr_info("Toshiba SCI already opened\n");
                return 1;
+       } else if (out[0] == TOS_NOT_SUPPORTED) {
+               /*
+                * Some BIOSes do not have the SCI open/close functions
+                * implemented and return 0x8000 (Not Supported), failing to
+                * register some supported features.
+                *
+                * Simply return 1 if we hit those affected laptops to make the
+                * supported features work.
+                *
+                * In the case that some laptops really do not support the SCI,
+                * all the SCI dependent functions check for TOS_NOT_SUPPORTED,
+                * and thus, not registering support for the queried feature.
+                */
+               return 1;
        } else if (out[0] == TOS_NOT_PRESENT) {
                pr_info("Toshiba SCI is not present\n");
        }
@@ -421,6 +458,7 @@ static u32 sci_read(struct toshiba_acpi_dev *dev, u32 reg, u32 *out1)
        u32 in[TCI_WORDS] = { SCI_GET, reg, 0, 0, 0, 0 };
        u32 out[TCI_WORDS];
        acpi_status status = tci_raw(dev, in, out);
+
        if (ACPI_FAILURE(status))
                return TOS_FAILURE;
 
@@ -529,10 +567,11 @@ static int toshiba_kbd_illum_available(struct toshiba_acpi_dev *dev)
                return 0;
        }
 
-       /* Check for keyboard backlight timeout max value,
+       /*
+        * Check for keyboard backlight timeout max value,
         * previous kbd backlight implementation set this to
         * 0x3c0003, and now the new implementation set this
-        * to 0x3c001a, use this to distinguish between them
+        * to 0x3c001a, use this to distinguish between them.
         */
        if (out[3] == SCI_KBD_TIME_MAX)
                dev->kbd_type = 2;
@@ -667,19 +706,37 @@ static int toshiba_touchpad_get(struct toshiba_acpi_dev *dev, u32 *state)
 static int toshiba_eco_mode_available(struct toshiba_acpi_dev *dev)
 {
        acpi_status status;
-       u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 1, 0, 0 };
+       u32 in[TCI_WORDS] = { HCI_GET, HCI_ECO_MODE, 0, 0, 0, 0 };
        u32 out[TCI_WORDS];
 
        status = tci_raw(dev, in, out);
-       if (ACPI_FAILURE(status) || out[0] == TOS_INPUT_DATA_ERROR) {
-               pr_info("ACPI call to get ECO led failed\n");
-               return 0;
+       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+               pr_err("ACPI call to get ECO led failed\n");
+       } else if (out[0] == TOS_NOT_INSTALLED) {
+               pr_info("ECO led not installed");
+       } else if (out[0] == TOS_INPUT_DATA_ERROR) {
+               /*
+                * If we receive 0x8300 (Input Data Error), it means that the
+                * LED device is present, but that we just screwed the input
+                * parameters.
+                *
+                * Let's query the status of the LED to see if we really have a
+                * success response, indicating the actual presense of the LED,
+                * bail out otherwise.
+                */
+               in[3] = 1;
+               status = tci_raw(dev, in, out);
+               if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE)
+                       pr_err("ACPI call to get ECO led failed\n");
+               else if (out[0] == TOS_SUCCESS)
+                       return 1;
        }
 
-       return 1;
+       return 0;
 }
 
-static enum led_brightness toshiba_eco_mode_get_status(struct led_classdev *cdev)
+static enum led_brightness
+toshiba_eco_mode_get_status(struct led_classdev *cdev)
 {
        struct toshiba_acpi_dev *dev = container_of(cdev,
                        struct toshiba_acpi_dev, eco_led);
@@ -721,7 +778,8 @@ static int toshiba_accelerometer_supported(struct toshiba_acpi_dev *dev)
        u32 out[TCI_WORDS];
        acpi_status status;
 
-       /* Check if the accelerometer call exists,
+       /*
+        * Check if the accelerometer call exists,
         * this call also serves as initialization
         */
        status = tci_raw(dev, in, out);
@@ -760,198 +818,533 @@ static int toshiba_accelerometer_get(struct toshiba_acpi_dev *dev,
        return 0;
 }
 
-/* Bluetooth rfkill handlers */
-
-static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present)
+/* Sleep (Charge and Music) utilities support */
+static int toshiba_usb_sleep_charge_get(struct toshiba_acpi_dev *dev,
+                                       u32 *mode)
 {
-       u32 hci_result;
-       u32 value, value2;
+       u32 result;
 
-       value = 0;
-       value2 = 0;
-       hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2);
-       if (hci_result == TOS_SUCCESS)
-               *present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false;
+       if (!sci_open(dev))
+               return -EIO;
 
-       return hci_result;
+       result = sci_read(dev, SCI_USB_SLEEP_CHARGE, mode);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to set USB S&C mode failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
+
+       return 0;
 }
 
-static u32 hci_get_radio_state(struct toshiba_acpi_dev *dev, bool *radio_state)
+static int toshiba_usb_sleep_charge_set(struct toshiba_acpi_dev *dev,
+                                       u32 mode)
 {
-       u32 hci_result;
-       u32 value, value2;
+       u32 result;
 
-       value = 0;
-       value2 = 0x0001;
-       hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2);
+       if (!sci_open(dev))
+               return -EIO;
 
-       *radio_state = value & HCI_WIRELESS_KILL_SWITCH;
-       return hci_result;
+       result = sci_write(dev, SCI_USB_SLEEP_CHARGE, mode);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to set USB S&C mode failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
+
+       return 0;
 }
 
-static int bt_rfkill_set_block(void *data, bool blocked)
+static int toshiba_sleep_functions_status_get(struct toshiba_acpi_dev *dev,
+                                             u32 *mode)
 {
-       struct toshiba_acpi_dev *dev = data;
-       u32 result1, result2;
-       u32 value;
-       int err;
-       bool radio_state;
-
-       value = (blocked == false);
+       u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+       u32 out[TCI_WORDS];
+       acpi_status status;
 
-       mutex_lock(&dev->mutex);
-       if (hci_get_radio_state(dev, &radio_state) != TOS_SUCCESS) {
-               err = -EIO;
-               goto out;
-       }
+       if (!sci_open(dev))
+               return -EIO;
 
-       if (!radio_state) {
-               err = 0;
-               goto out;
+       in[5] = SCI_USB_CHARGE_BAT_LVL;
+       status = tci_raw(dev, in, out);
+       sci_close(dev);
+       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+               pr_err("ACPI call to get USB S&C battery level failed\n");
+               return -EIO;
+       } else if (out[0] == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       } else if (out[0] == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
        }
 
-       result1 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER);
-       result2 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH);
+       *mode = out[2];
 
-       if (result1 != TOS_SUCCESS || result2 != TOS_SUCCESS)
-               err = -EIO;
-       else
-               err = 0;
- out:
-       mutex_unlock(&dev->mutex);
-       return err;
+       return 0;
 }
 
-static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
+static int toshiba_sleep_functions_status_set(struct toshiba_acpi_dev *dev,
+                                             u32 mode)
 {
-       bool new_rfk_state;
-       bool value;
-       u32 hci_result;
-       struct toshiba_acpi_dev *dev = data;
+       u32 in[TCI_WORDS] = { SCI_SET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+       u32 out[TCI_WORDS];
+       acpi_status status;
 
-       mutex_lock(&dev->mutex);
+       if (!sci_open(dev))
+               return -EIO;
 
-       hci_result = hci_get_radio_state(dev, &value);
-       if (hci_result != TOS_SUCCESS) {
-               /* Can't do anything useful */
-               mutex_unlock(&dev->mutex);
-               return;
+       in[2] = mode;
+       in[5] = SCI_USB_CHARGE_BAT_LVL;
+       status = tci_raw(dev, in, out);
+       sci_close(dev);
+       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+               pr_err("ACPI call to set USB S&C battery level failed\n");
+               return -EIO;
+       } else if (out[0] == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       } else if (out[0] == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
        }
 
-       new_rfk_state = value;
-
-       mutex_unlock(&dev->mutex);
-
-       if (rfkill_set_hw_state(rfkill, !new_rfk_state))
-               bt_rfkill_set_block(data, true);
+       return 0;
 }
 
-static const struct rfkill_ops toshiba_rfk_ops = {
-       .set_block = bt_rfkill_set_block,
-       .poll = bt_rfkill_poll,
-};
-
-static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
+static int toshiba_usb_rapid_charge_get(struct toshiba_acpi_dev *dev,
+                                       u32 *state)
 {
-       u32 hci_result;
-       u32 status;
+       u32 in[TCI_WORDS] = { SCI_GET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+       u32 out[TCI_WORDS];
+       acpi_status status;
 
-       hci_result = hci_read1(dev, HCI_TR_BACKLIGHT, &status);
-       *enabled = !status;
-       return hci_result == TOS_SUCCESS ? 0 : -EIO;
-}
+       if (!sci_open(dev))
+               return -EIO;
 
-static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
-{
-       u32 hci_result;
-       u32 value = !enable;
+       in[5] = SCI_USB_CHARGE_RAPID_DSP;
+       status = tci_raw(dev, in, out);
+       sci_close(dev);
+       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+               pr_err("ACPI call to get USB S&C battery level failed\n");
+               return -EIO;
+       } else if (out[0] == TOS_NOT_SUPPORTED ||
+                  out[0] == TOS_INPUT_DATA_ERROR) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       }
 
-       hci_result = hci_write1(dev, HCI_TR_BACKLIGHT, value);
-       return hci_result == TOS_SUCCESS ? 0 : -EIO;
-}
+       *state = out[2];
 
-static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
+       return 0;
+}
 
-static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
+static int toshiba_usb_rapid_charge_set(struct toshiba_acpi_dev *dev,
+                                       u32 state)
 {
-       u32 hci_result;
-       u32 value;
-       int brightness = 0;
+       u32 in[TCI_WORDS] = { SCI_SET, SCI_USB_SLEEP_CHARGE, 0, 0, 0, 0 };
+       u32 out[TCI_WORDS];
+       acpi_status status;
 
-       if (dev->tr_backlight_supported) {
-               bool enabled;
-               int ret = get_tr_backlight_status(dev, &enabled);
-               if (ret)
-                       return ret;
-               if (enabled)
-                       return 0;
-               brightness++;
-       }
+       if (!sci_open(dev))
+               return -EIO;
 
-       hci_result = hci_read1(dev, HCI_LCD_BRIGHTNESS, &value);
-       if (hci_result == TOS_SUCCESS)
-               return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
+       in[2] = state;
+       in[5] = SCI_USB_CHARGE_RAPID_DSP;
+       status = tci_raw(dev, in, out);
+       sci_close(dev);
+       if (ACPI_FAILURE(status) || out[0] == TOS_FAILURE) {
+               pr_err("ACPI call to set USB S&C battery level failed\n");
+               return -EIO;
+       } else if (out[0] == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       } else if (out[0] == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
 
-       return -EIO;
+       return 0;
 }
 
-static int get_lcd_brightness(struct backlight_device *bd)
+static int toshiba_usb_sleep_music_get(struct toshiba_acpi_dev *dev, u32 *state)
 {
-       struct toshiba_acpi_dev *dev = bl_get_data(bd);
-       return __get_lcd_brightness(dev);
-}
+       u32 result;
 
-static int lcd_proc_show(struct seq_file *m, void *v)
-{
-       struct toshiba_acpi_dev *dev = m->private;
-       int value;
-       int levels;
+       if (!sci_open(dev))
+               return -EIO;
 
-       if (!dev->backlight_dev)
+       result = sci_read(dev, SCI_USB_SLEEP_MUSIC, state);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to set USB S&C mode failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
                return -ENODEV;
-
-       levels = dev->backlight_dev->props.max_brightness + 1;
-       value = get_lcd_brightness(dev->backlight_dev);
-       if (value >= 0) {
-               seq_printf(m, "brightness:              %d\n", value);
-               seq_printf(m, "brightness_levels:       %d\n", levels);
-               return 0;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
        }
 
-       pr_err("Error reading LCD brightness\n");
-       return -EIO;
+       return 0;
 }
 
-static int lcd_proc_open(struct inode *inode, struct file *file)
+static int toshiba_usb_sleep_music_set(struct toshiba_acpi_dev *dev, u32 state)
 {
-       return single_open(file, lcd_proc_show, PDE_DATA(inode));
-}
+       u32 result;
 
-static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
-{
-       u32 hci_result;
+       if (!sci_open(dev))
+               return -EIO;
 
-       if (dev->tr_backlight_supported) {
-               bool enable = !value;
-               int ret = set_tr_backlight_status(dev, enable);
-               if (ret)
-                       return ret;
-               if (value)
-                       value--;
+       result = sci_write(dev, SCI_USB_SLEEP_MUSIC, state);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to set USB S&C mode failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("USB Sleep and Charge not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
        }
 
-       value = value << HCI_LCD_BRIGHTNESS_SHIFT;
-       hci_result = hci_write1(dev, HCI_LCD_BRIGHTNESS, value);
-       return hci_result == TOS_SUCCESS ? 0 : -EIO;
+       return 0;
 }
 
-static int set_lcd_status(struct backlight_device *bd)
+/* Keyboard function keys */
+static int toshiba_function_keys_get(struct toshiba_acpi_dev *dev, u32 *mode)
 {
-       struct toshiba_acpi_dev *dev = bl_get_data(bd);
-       return set_lcd_brightness(dev, bd->props.brightness);
-}
+       u32 result;
 
-static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
+       if (!sci_open(dev))
+               return -EIO;
+
+       result = sci_read(dev, SCI_KBD_FUNCTION_KEYS, mode);
+       sci_close(dev);
+       if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+               pr_err("ACPI call to get KBD function keys failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("KBD function keys not supported\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+static int toshiba_function_keys_set(struct toshiba_acpi_dev *dev, u32 mode)
+{
+       u32 result;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       result = sci_write(dev, SCI_KBD_FUNCTION_KEYS, mode);
+       sci_close(dev);
+       if (result == TOS_FAILURE || result == TOS_INPUT_DATA_ERROR) {
+               pr_err("ACPI call to set KBD function keys failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("KBD function keys not supported\n");
+               return -ENODEV;
+       }
+
+       return 0;
+}
+
+/* Panel Power ON */
+static int toshiba_panel_power_on_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+       u32 result;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       result = sci_read(dev, SCI_PANEL_POWER_ON, state);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to get Panel Power ON failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("Panel Power on not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int toshiba_panel_power_on_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+       u32 result;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       result = sci_write(dev, SCI_PANEL_POWER_ON, state);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to set Panel Power ON failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("Panel Power ON not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/* USB Three */
+static int toshiba_usb_three_get(struct toshiba_acpi_dev *dev, u32 *state)
+{
+       u32 result;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       result = sci_read(dev, SCI_USB_THREE, state);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to get USB 3 failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("USB 3 not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
+
+       return 0;
+}
+
+static int toshiba_usb_three_set(struct toshiba_acpi_dev *dev, u32 state)
+{
+       u32 result;
+
+       if (!sci_open(dev))
+               return -EIO;
+
+       result = sci_write(dev, SCI_USB_THREE, state);
+       sci_close(dev);
+       if (result == TOS_FAILURE) {
+               pr_err("ACPI call to set USB 3 failed\n");
+               return -EIO;
+       } else if (result == TOS_NOT_SUPPORTED) {
+               pr_info("USB 3 not supported\n");
+               return -ENODEV;
+       } else if (result == TOS_INPUT_DATA_ERROR) {
+               return -EIO;
+       }
+
+       return 0;
+}
+
+/* Bluetooth rfkill handlers */
+
+static u32 hci_get_bt_present(struct toshiba_acpi_dev *dev, bool *present)
+{
+       u32 hci_result;
+       u32 value, value2;
+
+       value = 0;
+       value2 = 0;
+       hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2);
+       if (hci_result == TOS_SUCCESS)
+               *present = (value & HCI_WIRELESS_BT_PRESENT) ? true : false;
+
+       return hci_result;
+}
+
+static u32 hci_get_radio_state(struct toshiba_acpi_dev *dev, bool *radio_state)
+{
+       u32 hci_result;
+       u32 value, value2;
+
+       value = 0;
+       value2 = 0x0001;
+       hci_result = hci_read2(dev, HCI_WIRELESS, &value, &value2);
+
+       *radio_state = value & HCI_WIRELESS_KILL_SWITCH;
+       return hci_result;
+}
+
+static int bt_rfkill_set_block(void *data, bool blocked)
+{
+       struct toshiba_acpi_dev *dev = data;
+       u32 result1, result2;
+       u32 value;
+       int err;
+       bool radio_state;
+
+       value = (blocked == false);
+
+       mutex_lock(&dev->mutex);
+       if (hci_get_radio_state(dev, &radio_state) != TOS_SUCCESS) {
+               err = -EIO;
+               goto out;
+       }
+
+       if (!radio_state) {
+               err = 0;
+               goto out;
+       }
+
+       result1 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_POWER);
+       result2 = hci_write2(dev, HCI_WIRELESS, value, HCI_WIRELESS_BT_ATTACH);
+
+       if (result1 != TOS_SUCCESS || result2 != TOS_SUCCESS)
+               err = -EIO;
+       else
+               err = 0;
+ out:
+       mutex_unlock(&dev->mutex);
+       return err;
+}
+
+static void bt_rfkill_poll(struct rfkill *rfkill, void *data)
+{
+       bool new_rfk_state;
+       bool value;
+       u32 hci_result;
+       struct toshiba_acpi_dev *dev = data;
+
+       mutex_lock(&dev->mutex);
+
+       hci_result = hci_get_radio_state(dev, &value);
+       if (hci_result != TOS_SUCCESS) {
+               /* Can't do anything useful */
+               mutex_unlock(&dev->mutex);
+               return;
+       }
+
+       new_rfk_state = value;
+
+       mutex_unlock(&dev->mutex);
+
+       if (rfkill_set_hw_state(rfkill, !new_rfk_state))
+               bt_rfkill_set_block(data, true);
+}
+
+static const struct rfkill_ops toshiba_rfk_ops = {
+       .set_block = bt_rfkill_set_block,
+       .poll = bt_rfkill_poll,
+};
+
+static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
+{
+       u32 hci_result;
+       u32 status;
+
+       hci_result = hci_read1(dev, HCI_TR_BACKLIGHT, &status);
+       *enabled = !status;
+       return hci_result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
+{
+       u32 hci_result;
+       u32 value = !enable;
+
+       hci_result = hci_write1(dev, HCI_TR_BACKLIGHT, value);
+       return hci_result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static struct proc_dir_entry *toshiba_proc_dir /*= 0*/;
+
+static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
+{
+       u32 hci_result;
+       u32 value;
+       int brightness = 0;
+
+       if (dev->tr_backlight_supported) {
+               bool enabled;
+               int ret = get_tr_backlight_status(dev, &enabled);
+
+               if (ret)
+                       return ret;
+               if (enabled)
+                       return 0;
+               brightness++;
+       }
+
+       hci_result = hci_read1(dev, HCI_LCD_BRIGHTNESS, &value);
+       if (hci_result == TOS_SUCCESS)
+               return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
+
+       return -EIO;
+}
+
+static int get_lcd_brightness(struct backlight_device *bd)
+{
+       struct toshiba_acpi_dev *dev = bl_get_data(bd);
+
+       return __get_lcd_brightness(dev);
+}
+
+static int lcd_proc_show(struct seq_file *m, void *v)
+{
+       struct toshiba_acpi_dev *dev = m->private;
+       int value;
+       int levels;
+
+       if (!dev->backlight_dev)
+               return -ENODEV;
+
+       levels = dev->backlight_dev->props.max_brightness + 1;
+       value = get_lcd_brightness(dev->backlight_dev);
+       if (value >= 0) {
+               seq_printf(m, "brightness:              %d\n", value);
+               seq_printf(m, "brightness_levels:       %d\n", levels);
+               return 0;
+       }
+
+       pr_err("Error reading LCD brightness\n");
+       return -EIO;
+}
+
+static int lcd_proc_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, lcd_proc_show, PDE_DATA(inode));
+}
+
+static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
+{
+       u32 hci_result;
+
+       if (dev->tr_backlight_supported) {
+               bool enable = !value;
+               int ret = set_tr_backlight_status(dev, enable);
+
+               if (ret)
+                       return ret;
+               if (value)
+                       value--;
+       }
+
+       value = value << HCI_LCD_BRIGHTNESS_SHIFT;
+       hci_result = hci_write1(dev, HCI_LCD_BRIGHTNESS, value);
+       return hci_result == TOS_SUCCESS ? 0 : -EIO;
+}
+
+static int set_lcd_status(struct backlight_device *bd)
+{
+       struct toshiba_acpi_dev *dev = bl_get_data(bd);
+
+       return set_lcd_brightness(dev, bd->props.brightness);
+}
+
+static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
                              size_t count, loff_t *pos)
 {
        struct toshiba_acpi_dev *dev = PDE_DATA(file_inode(file));
@@ -1005,6 +1398,7 @@ static int video_proc_show(struct seq_file *m, void *v)
                int is_lcd = (value & HCI_VIDEO_OUT_LCD) ? 1 : 0;
                int is_crt = (value & HCI_VIDEO_OUT_CRT) ? 1 : 0;
                int is_tv = (value & HCI_VIDEO_OUT_TV) ? 1 : 0;
+
                seq_printf(m, "lcd_out:                 %d\n", is_lcd);
                seq_printf(m, "crt_out:                 %d\n", is_crt);
                seq_printf(m, "tv_out:                  %d\n", is_tv);
@@ -1042,9 +1436,9 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
 
        buffer = cmd;
 
-       /* scan expression.  Multiple expressions may be delimited with ;
-        *
-        *  NOTE: to keep scanning simple, invalid fields are ignored
+       /*
+        * Scan expression.  Multiple expressions may be delimited with ;
+        * NOTE: To keep scanning simple, invalid fields are ignored.
         */
        while (remain) {
                if (sscanf(buffer, " lcd_out : %i", &value) == 1)
@@ -1053,12 +1447,11 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
                        crt_out = value & 1;
                else if (sscanf(buffer, " tv_out : %i", &value) == 1)
                        tv_out = value & 1;
-               /* advance to one character past the next ; */
+               /* Advance to one character past the next ; */
                do {
                        ++buffer;
                        --remain;
-               }
-               while (remain && *(buffer - 1) != ';');
+               } while (remain && *(buffer - 1) != ';');
        }
 
        kfree(cmd);
@@ -1066,13 +1459,15 @@ static ssize_t video_proc_write(struct file *file, const char __user *buf,
        ret = get_video_status(dev, &video_out);
        if (!ret) {
                unsigned int new_video_out = video_out;
+
                if (lcd_out != -1)
                        _set_bit(&new_video_out, HCI_VIDEO_OUT_LCD, lcd_out);
                if (crt_out != -1)
                        _set_bit(&new_video_out, HCI_VIDEO_OUT_CRT, crt_out);
                if (tv_out != -1)
                        _set_bit(&new_video_out, HCI_VIDEO_OUT_TV, tv_out);
-               /* To avoid unnecessary video disruption, only write the new
+               /*
+                * To avoid unnecessary video disruption, only write the new
                 * video setting if something changed. */
                if (new_video_out != video_out)
                        ret = write_acpi_int(METHOD_VIDEO_OUT, new_video_out);
@@ -1135,10 +1530,10 @@ static ssize_t fan_proc_write(struct file *file, const char __user *buf,
        if (sscanf(cmd, " force_on : %i", &value) == 1 &&
            value >= 0 && value <= 1) {
                hci_result = hci_write1(dev, HCI_FAN, value);
-               if (hci_result != TOS_SUCCESS)
-                       return -EIO;
-               else
+               if (hci_result == TOS_SUCCESS)
                        dev->force_fan = value;
+               else
+                       return -EIO;
        } else {
                return -EINVAL;
        }
@@ -1167,11 +1562,13 @@ static int keys_proc_show(struct seq_file *m, void *v)
                        dev->key_event_valid = 1;
                        dev->last_key_event = value;
                } else if (hci_result == TOS_FIFO_EMPTY) {
-                       /* better luck next time */
+                       /* Better luck next time */
                } else if (hci_result == TOS_NOT_SUPPORTED) {
-                       /* This is a workaround for an unresolved issue on
+                       /*
+                        * This is a workaround for an unresolved issue on
                         * some machines where system events sporadically
-                        * become disabled. */
+                        * become disabled.
+                        */
                        hci_result = hci_write1(dev, HCI_SYSTEM_EVENT, 1);
                        pr_notice("Re-enabled hotkeys\n");
                } else {
@@ -1203,11 +1600,10 @@ static ssize_t keys_proc_write(struct file *file, const char __user *buf,
                return -EFAULT;
        cmd[len] = '\0';
 
-       if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0) {
+       if (sscanf(cmd, " hotkey_ready : %i", &value) == 1 && value == 0)
                dev->key_event_valid = 0;
-       } else {
+       else
                return -EINVAL;
-       }
 
        return count;
 }
@@ -1241,7 +1637,8 @@ static const struct file_operations version_proc_fops = {
        .release        = single_release,
 };
 
-/* proc and module init
+/*
+ * Proc and module init
  */
 
 #define PROC_TOSHIBA           "toshiba"
@@ -1286,66 +1683,56 @@ static const struct backlight_ops toshiba_backlight_data = {
 /*
  * Sysfs files
  */
-static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
-                                        struct device_attribute *attr,
-                                        const char *buf, size_t count);
-static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf);
-static ssize_t toshiba_kbd_type_show(struct device *dev,
-                                    struct device_attribute *attr,
-                                    char *buf);
-static ssize_t toshiba_available_kbd_modes_show(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf);
-static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
-                                           struct device_attribute *attr,
-                                           const char *buf, size_t count);
-static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
-                                          struct device_attribute *attr,
-                                          char *buf);
-static ssize_t toshiba_touchpad_store(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buf, size_t count);
-static ssize_t toshiba_touchpad_show(struct device *dev,
-                                    struct device_attribute *attr,
-                                    char *buf);
-static ssize_t toshiba_position_show(struct device *dev,
-                                    struct device_attribute *attr,
-                                    char *buf);
-
-static DEVICE_ATTR(kbd_backlight_mode, S_IRUGO | S_IWUSR,
-                  toshiba_kbd_bl_mode_show, toshiba_kbd_bl_mode_store);
-static DEVICE_ATTR(kbd_type, S_IRUGO, toshiba_kbd_type_show, NULL);
-static DEVICE_ATTR(available_kbd_modes, S_IRUGO,
-                  toshiba_available_kbd_modes_show, NULL);
-static DEVICE_ATTR(kbd_backlight_timeout, S_IRUGO | S_IWUSR,
-                  toshiba_kbd_bl_timeout_show, toshiba_kbd_bl_timeout_store);
-static DEVICE_ATTR(touchpad, S_IRUGO | S_IWUSR,
-                  toshiba_touchpad_show, toshiba_touchpad_store);
-static DEVICE_ATTR(position, S_IRUGO, toshiba_position_show, NULL);
+static ssize_t version_show(struct device *dev,
+                           struct device_attribute *attr, char *buf)
+{
+       return sprintf(buf, "%s\n", TOSHIBA_ACPI_VERSION);
+}
+static DEVICE_ATTR_RO(version);
 
-static struct attribute *toshiba_attributes[] = {
-       &dev_attr_kbd_backlight_mode.attr,
-       &dev_attr_kbd_type.attr,
-       &dev_attr_available_kbd_modes.attr,
-       &dev_attr_kbd_backlight_timeout.attr,
-       &dev_attr_touchpad.attr,
-       &dev_attr_position.attr,
-       NULL,
-};
+static ssize_t fan_store(struct device *dev,
+                        struct device_attribute *attr,
+                        const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 result;
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+
+       if (state != 0 && state != 1)
+               return -EINVAL;
+
+       result = hci_write1(toshiba, HCI_FAN, state);
+       if (result == TOS_FAILURE)
+               return -EIO;
+       else if (result == TOS_NOT_SUPPORTED)
+               return -ENODEV;
+
+       return count;
+}
+
+static ssize_t fan_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 value;
+       int ret;
 
-static umode_t toshiba_sysfs_is_visible(struct kobject *,
-                                       struct attribute *, int);
+       ret = get_fan_status(toshiba, &value);
+       if (ret)
+               return ret;
 
-static struct attribute_group toshiba_attr_group = {
-       .is_visible = toshiba_sysfs_is_visible,
-       .attrs = toshiba_attributes,
-};
+       return sprintf(buf, "%d\n", value);
+}
+static DEVICE_ATTR_RW(fan);
 
-static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
-                                        struct device_attribute *attr,
-                                        const char *buf, size_t count)
+static ssize_t kbd_backlight_mode_store(struct device *dev,
+                                       struct device_attribute *attr,
+                                       const char *buf, size_t count)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        int mode;
@@ -1369,7 +1756,8 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
                        return -EINVAL;
        }
 
-       /* Set the Keyboard Backlight Mode where:
+       /*
+        * Set the Keyboard Backlight Mode where:
         *      Auto - KBD backlight turns off automatically in given time
         *      FN-Z - KBD backlight "toggles" when hotkey pressed
         *      ON   - KBD backlight is always on
@@ -1400,9 +1788,9 @@ static ssize_t toshiba_kbd_bl_mode_store(struct device *dev,
        return count;
 }
 
-static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
-                                       struct device_attribute *attr,
-                                       char *buf)
+static ssize_t kbd_backlight_mode_show(struct device *dev,
+                                      struct device_attribute *attr,
+                                      char *buf)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        u32 time;
@@ -1412,19 +1800,20 @@ static ssize_t toshiba_kbd_bl_mode_show(struct device *dev,
 
        return sprintf(buf, "%i\n", time & SCI_KBD_MODE_MASK);
 }
+static DEVICE_ATTR_RW(kbd_backlight_mode);
 
-static ssize_t toshiba_kbd_type_show(struct device *dev,
-                                    struct device_attribute *attr,
-                                    char *buf)
+static ssize_t kbd_type_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
 
        return sprintf(buf, "%d\n", toshiba->kbd_type);
 }
+static DEVICE_ATTR_RO(kbd_type);
 
-static ssize_t toshiba_available_kbd_modes_show(struct device *dev,
-                                               struct device_attribute *attr,
-                                               char *buf)
+static ssize_t available_kbd_modes_show(struct device *dev,
+                                       struct device_attribute *attr,
+                                       char *buf)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
 
@@ -1435,10 +1824,11 @@ static ssize_t toshiba_available_kbd_modes_show(struct device *dev,
        return sprintf(buf, "%x %x %x\n",
                       SCI_KBD_MODE_AUTO, SCI_KBD_MODE_ON, SCI_KBD_MODE_OFF);
 }
+static DEVICE_ATTR_RO(available_kbd_modes);
 
-static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
-                                           struct device_attribute *attr,
-                                           const char *buf, size_t count)
+static ssize_t kbd_backlight_timeout_store(struct device *dev,
+                                          struct device_attribute *attr,
+                                          const char *buf, size_t count)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        int time;
@@ -1479,9 +1869,9 @@ static ssize_t toshiba_kbd_bl_timeout_store(struct device *dev,
        return count;
 }
 
-static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
-                                          struct device_attribute *attr,
-                                          char *buf)
+static ssize_t kbd_backlight_timeout_show(struct device *dev,
+                                         struct device_attribute *attr,
+                                         char *buf)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        u32 time;
@@ -1491,10 +1881,11 @@ static ssize_t toshiba_kbd_bl_timeout_show(struct device *dev,
 
        return sprintf(buf, "%i\n", time >> HCI_MISC_SHIFT);
 }
+static DEVICE_ATTR_RW(kbd_backlight_timeout);
 
-static ssize_t toshiba_touchpad_store(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buf, size_t count)
+static ssize_t touchpad_store(struct device *dev,
+                             struct device_attribute *attr,
+                             const char *buf, size_t count)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        int state;
@@ -1514,8 +1905,8 @@ static ssize_t toshiba_touchpad_store(struct device *dev,
        return count;
 }
 
-static ssize_t toshiba_touchpad_show(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
+static ssize_t touchpad_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        u32 state;
@@ -1527,9 +1918,10 @@ static ssize_t toshiba_touchpad_show(struct device *dev,
 
        return sprintf(buf, "%i\n", state);
 }
+static DEVICE_ATTR_RW(touchpad);
 
-static ssize_t toshiba_position_show(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
+static ssize_t position_show(struct device *dev,
+                            struct device_attribute *attr, char *buf)
 {
        struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
        u32 xyval, zval, tmp;
@@ -1548,6 +1940,336 @@ static ssize_t toshiba_position_show(struct device *dev,
 
        return sprintf(buf, "%d %d %d\n", x, y, z);
 }
+static DEVICE_ATTR_RO(position);
+
+static ssize_t usb_sleep_charge_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 mode;
+       int ret;
+
+       ret = toshiba_usb_sleep_charge_get(toshiba, &mode);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%x\n", mode & SCI_USB_CHARGE_MODE_MASK);
+}
+
+static ssize_t usb_sleep_charge_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 mode;
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+       /*
+        * Check for supported values, where:
+        * 0 - Disabled
+        * 1 - Alternate (Non USB conformant devices that require more power)
+        * 2 - Auto (USB conformant devices)
+        */
+       if (state != 0 && state != 1 && state != 2)
+               return -EINVAL;
+
+       /* Set the USB charging mode to internal value */
+       if (state == 0)
+               mode = SCI_USB_CHARGE_DISABLED;
+       else if (state == 1)
+               mode = SCI_USB_CHARGE_ALTERNATE;
+       else if (state == 2)
+               mode = SCI_USB_CHARGE_AUTO;
+
+       ret = toshiba_usb_sleep_charge_set(toshiba, mode);
+       if (ret)
+               return ret;
+
+       return count;
+}
+static DEVICE_ATTR_RW(usb_sleep_charge);
+
+static ssize_t sleep_functions_on_battery_show(struct device *dev,
+                                              struct device_attribute *attr,
+                                              char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 state;
+       int bat_lvl;
+       int status;
+       int ret;
+       int tmp;
+
+       ret = toshiba_sleep_functions_status_get(toshiba, &state);
+       if (ret < 0)
+               return ret;
+
+       /* Determine the status: 0x4 - Enabled | 0x1 - Disabled */
+       tmp = state & SCI_USB_CHARGE_BAT_MASK;
+       status = (tmp == 0x4) ? 1 : 0;
+       /* Determine the battery level set */
+       bat_lvl = state >> HCI_MISC_SHIFT;
+
+       return sprintf(buf, "%d %d\n", status, bat_lvl);
+}
+
+static ssize_t sleep_functions_on_battery_store(struct device *dev,
+                                               struct device_attribute *attr,
+                                               const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 status;
+       int value;
+       int ret;
+       int tmp;
+
+       ret = kstrtoint(buf, 0, &value);
+       if (ret)
+               return ret;
+
+       /*
+        * Set the status of the function:
+        * 0 - Disabled
+        * 1-100 - Enabled
+        */
+       if (value < 0 || value > 100)
+               return -EINVAL;
+
+       if (value == 0) {
+               tmp = toshiba->usbsc_bat_level << HCI_MISC_SHIFT;
+               status = tmp | SCI_USB_CHARGE_BAT_LVL_OFF;
+       } else {
+               tmp = value << HCI_MISC_SHIFT;
+               status = tmp | SCI_USB_CHARGE_BAT_LVL_ON;
+       }
+       ret = toshiba_sleep_functions_status_set(toshiba, status);
+       if (ret < 0)
+               return ret;
+
+       toshiba->usbsc_bat_level = status >> HCI_MISC_SHIFT;
+
+       return count;
+}
+static DEVICE_ATTR_RW(sleep_functions_on_battery);
+
+static ssize_t usb_rapid_charge_show(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 state;
+       int ret;
+
+       ret = toshiba_usb_rapid_charge_get(toshiba, &state);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t usb_rapid_charge_store(struct device *dev,
+                                     struct device_attribute *attr,
+                                     const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+       if (state != 0 && state != 1)
+               return -EINVAL;
+
+       ret = toshiba_usb_rapid_charge_set(toshiba, state);
+       if (ret)
+               return ret;
+
+       return count;
+}
+static DEVICE_ATTR_RW(usb_rapid_charge);
+
+static ssize_t usb_sleep_music_show(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 state;
+       int ret;
+
+       ret = toshiba_usb_sleep_music_get(toshiba, &state);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t usb_sleep_music_store(struct device *dev,
+                                    struct device_attribute *attr,
+                                    const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+       if (state != 0 && state != 1)
+               return -EINVAL;
+
+       ret = toshiba_usb_sleep_music_set(toshiba, state);
+       if (ret)
+               return ret;
+
+       return count;
+}
+static DEVICE_ATTR_RW(usb_sleep_music);
+
+static ssize_t kbd_function_keys_show(struct device *dev,
+                                     struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int mode;
+       int ret;
+
+       ret = toshiba_function_keys_get(toshiba, &mode);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", mode);
+}
+
+static ssize_t kbd_function_keys_store(struct device *dev,
+                                      struct device_attribute *attr,
+                                      const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int mode;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &mode);
+       if (ret)
+               return ret;
+       /*
+        * Check for the function keys mode where:
+        * 0 - Normal operation (F{1-12} as usual and hotkeys via FN-F{1-12})
+        * 1 - Special functions (Opposite of the above setting)
+        */
+       if (mode != 0 && mode != 1)
+               return -EINVAL;
+
+       ret = toshiba_function_keys_set(toshiba, mode);
+       if (ret)
+               return ret;
+
+       pr_info("Reboot for changes to KBD Function Keys to take effect");
+
+       return count;
+}
+static DEVICE_ATTR_RW(kbd_function_keys);
+
+static ssize_t panel_power_on_show(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 state;
+       int ret;
+
+       ret = toshiba_panel_power_on_get(toshiba, &state);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t panel_power_on_store(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+       if (state != 0 && state != 1)
+               return -EINVAL;
+
+       ret = toshiba_panel_power_on_set(toshiba, state);
+       if (ret)
+               return ret;
+
+       pr_info("Reboot for changes to Panel Power ON to take effect");
+
+       return count;
+}
+static DEVICE_ATTR_RW(panel_power_on);
+
+static ssize_t usb_three_show(struct device *dev,
+                             struct device_attribute *attr, char *buf)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       u32 state;
+       int ret;
+
+       ret = toshiba_usb_three_get(toshiba, &state);
+       if (ret < 0)
+               return ret;
+
+       return sprintf(buf, "%d\n", state);
+}
+
+static ssize_t usb_three_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t count)
+{
+       struct toshiba_acpi_dev *toshiba = dev_get_drvdata(dev);
+       int state;
+       int ret;
+
+       ret = kstrtoint(buf, 0, &state);
+       if (ret)
+               return ret;
+       /*
+        * Check for USB 3 mode where:
+        * 0 - Disabled (Acts like a USB 2 port, saving power)
+        * 1 - Enabled
+        */
+       if (state != 0 && state != 1)
+               return -EINVAL;
+
+       ret = toshiba_usb_three_set(toshiba, state);
+       if (ret)
+               return ret;
+
+       pr_info("Reboot for changes to USB 3 to take effect");
+
+       return count;
+}
+static DEVICE_ATTR_RW(usb_three);
+
+static struct attribute *toshiba_attributes[] = {
+       &dev_attr_version.attr,
+       &dev_attr_fan.attr,
+       &dev_attr_kbd_backlight_mode.attr,
+       &dev_attr_kbd_type.attr,
+       &dev_attr_available_kbd_modes.attr,
+       &dev_attr_kbd_backlight_timeout.attr,
+       &dev_attr_touchpad.attr,
+       &dev_attr_position.attr,
+       &dev_attr_usb_sleep_charge.attr,
+       &dev_attr_sleep_functions_on_battery.attr,
+       &dev_attr_usb_rapid_charge.attr,
+       &dev_attr_usb_sleep_music.attr,
+       &dev_attr_kbd_function_keys.attr,
+       &dev_attr_panel_power_on.attr,
+       &dev_attr_usb_three.attr,
+       NULL,
+};
 
 static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
                                        struct attribute *attr, int idx)
@@ -1556,7 +2278,9 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
        struct toshiba_acpi_dev *drv = dev_get_drvdata(dev);
        bool exists = true;
 
-       if (attr == &dev_attr_kbd_backlight_mode.attr)
+       if (attr == &dev_attr_fan.attr)
+               exists = (drv->fan_supported) ? true : false;
+       else if (attr == &dev_attr_kbd_backlight_mode.attr)
                exists = (drv->kbd_illum_supported) ? true : false;
        else if (attr == &dev_attr_kbd_backlight_timeout.attr)
                exists = (drv->kbd_mode == SCI_KBD_MODE_AUTO) ? true : false;
@@ -1564,10 +2288,29 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj,
                exists = (drv->touchpad_supported) ? true : false;
        else if (attr == &dev_attr_position.attr)
                exists = (drv->accelerometer_supported) ? true : false;
+       else if (attr == &dev_attr_usb_sleep_charge.attr)
+               exists = (drv->usb_sleep_charge_supported) ? true : false;
+       else if (attr == &dev_attr_sleep_functions_on_battery.attr)
+               exists = (drv->usb_sleep_charge_supported) ? true : false;
+       else if (attr == &dev_attr_usb_rapid_charge.attr)
+               exists = (drv->usb_rapid_charge_supported) ? true : false;
+       else if (attr == &dev_attr_usb_sleep_music.attr)
+               exists = (drv->usb_sleep_music_supported) ? true : false;
+       else if (attr == &dev_attr_kbd_function_keys.attr)
+               exists = (drv->kbd_function_keys_supported) ? true : false;
+       else if (attr == &dev_attr_panel_power_on.attr)
+               exists = (drv->panel_power_on_supported) ? true : false;
+       else if (attr == &dev_attr_usb_three.attr)
+               exists = (drv->usb_three_supported) ? true : false;
 
        return exists ? attr->mode : 0;
 }
 
+static struct attribute_group toshiba_attr_group = {
+       .is_visible = toshiba_sysfs_is_visible,
+       .attrs = toshiba_attributes,
+};
+
 /*
  * Hotkeys
  */
@@ -1644,7 +2387,7 @@ static void toshiba_acpi_report_hotkey(struct toshiba_acpi_dev *dev,
        if (scancode == 0x100)
                return;
 
-       /* act on key press; ignore key release */
+       /* Act on key press; ignore key release */
        if (scancode & 0x80)
                return;
 
@@ -1680,7 +2423,7 @@ static void toshiba_acpi_process_hotkeys(struct toshiba_acpi_dev *dev)
                                hci_result =
                                        hci_write1(dev, HCI_SYSTEM_EVENT, 1);
                                pr_notice("Re-enabled hotkeys\n");
-                               /* fall through */
+                               /* Fall through */
                        default:
                                retries--;
                                break;
@@ -1802,7 +2545,7 @@ static int toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
        props.type = BACKLIGHT_PLATFORM;
        props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
 
-       /* adding an extra level and having 0 change to transflective mode */
+       /* Adding an extra level and having 0 change to transflective mode */
        if (dev->tr_backlight_supported)
                props.max_brightness++;
 
@@ -1973,6 +2716,24 @@ static int toshiba_acpi_add(struct acpi_device *acpi_dev)
        ret = toshiba_accelerometer_supported(dev);
        dev->accelerometer_supported = !ret;
 
+       ret = toshiba_usb_sleep_charge_get(dev, &dummy);
+       dev->usb_sleep_charge_supported = !ret;
+
+       ret = toshiba_usb_rapid_charge_get(dev, &dummy);
+       dev->usb_rapid_charge_supported = !ret;
+
+       ret = toshiba_usb_sleep_music_get(dev, &dummy);
+       dev->usb_sleep_music_supported = !ret;
+
+       ret = toshiba_function_keys_get(dev, &dummy);
+       dev->kbd_function_keys_supported = !ret;
+
+       ret = toshiba_panel_power_on_get(dev, &dummy);
+       dev->panel_power_on_supported = !ret;
+
+       ret = toshiba_usb_three_get(dev, &dummy);
+       dev->usb_three_supported = !ret;
+
        /* Determine whether or not BIOS supports fan and video interfaces */
 
        ret = get_video_status(dev, &dummy);
index 782e82289571b219ee0faa695c9d17cefb832c69..f980ff7166e98e93546f7f79657087983a5388ce 100644 (file)
@@ -179,8 +179,9 @@ int pnp_check_port(struct pnp_dev *dev, struct resource *res)
        /* check if the resource is already in use, skip if the
         * device is active because it itself may be in use */
        if (!dev->active) {
-               if (__check_region(&ioport_resource, *port, length(port, end)))
+               if (!request_region(*port, length(port, end), "pnp"))
                        return 0;
+               release_region(*port, length(port, end));
        }
 
        /* check if the resource is reserved */
@@ -241,8 +242,9 @@ int pnp_check_mem(struct pnp_dev *dev, struct resource *res)
        /* check if the resource is already in use, skip if the
         * device is active because it itself may be in use */
        if (!dev->active) {
-               if (check_mem_region(*addr, length(addr, end)))
+               if (!request_mem_region(*addr, length(addr, end), "pnp"))
                        return 0;
+               release_mem_region(*addr, length(addr, end));
        }
 
        /* check if the resource is reserved */
index cedb41c95daed56ebc622e6350eeb60d5ac3bda0..b5b5c3d485d637122ad21605ff56948d98f7af6a 100644 (file)
@@ -65,7 +65,7 @@ config RTC_DEBUG
 comment "RTC interfaces"
 
 config RTC_INTF_SYSFS
-       boolean "/sys/class/rtc/rtcN (sysfs)"
+       bool "/sys/class/rtc/rtcN (sysfs)"
        depends on SYSFS
        default RTC_CLASS
        help
@@ -75,7 +75,7 @@ config RTC_INTF_SYSFS
          If unsure, say Y.
 
 config RTC_INTF_PROC
-       boolean "/proc/driver/rtc (procfs for rtcN)"
+       bool "/proc/driver/rtc (procfs for rtcN)"
        depends on PROC_FS
        default RTC_CLASS
        help
@@ -88,7 +88,7 @@ config RTC_INTF_PROC
          If unsure, say Y.
 
 config RTC_INTF_DEV
-       boolean "/dev/rtcN (character devices)"
+       bool "/dev/rtcN (character devices)"
        default RTC_CLASS
        help
          Say yes here if you want to use your RTCs using the /dev
@@ -466,7 +466,7 @@ config RTC_DRV_DM355EVM
          Supports the RTC firmware in the MSP430 on the DM355 EVM.
 
 config RTC_DRV_TWL92330
-       boolean "TI TWL92330/Menelaus"
+       bool "TI TWL92330/Menelaus"
        depends on MENELAUS
        help
          If you say yes here you get support for the RTC on the
index aa3e2c7cd83c8a5275b900f37b23912884a789c7..a6f5ee80fadc5f3e8d462fc43b615a1134d509f6 100644 (file)
@@ -178,12 +178,6 @@ static void pci_esp_dma_drain(struct esp *esp)
                        break;
                cpu_relax();
        }
-       if (resid > 1) {
-               /* FIFO not cleared */
-               shost_printk(KERN_INFO, esp->host,
-                            "FIFO not cleared, %d bytes left\n",
-                            resid);
-       }
 
        /*
         * When there is a residual BCMPLT will never be set
index 96241b20fd2c8b690e57139e8e404a522dcb2392..a7cc618378187fb7d38e504d51befb93e78b223c 100644 (file)
@@ -585,7 +585,6 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
                        "beiscsi_hba_alloc - iscsi_host_alloc failed\n");
                return NULL;
        }
-       shost->dma_boundary = pcidev->dma_mask;
        shost->max_id = BE2_MAX_SESSIONS;
        shost->max_channel = 0;
        shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
index 95d581c45413fb38dd778e2b4695e7f7159c9c0b..a1cfbd3dda4713d05f254b0a9ef33131f6c12c0b 100644 (file)
@@ -6831,10 +6831,8 @@ static struct workqueue_struct *hpsa_create_controller_wq(struct ctlr_info *h,
                                                char *name)
 {
        struct workqueue_struct *wq = NULL;
-       char wq_name[20];
 
-       snprintf(wq_name, sizeof(wq_name), "%s_%d_hpsa", name, h->ctlr);
-       wq = alloc_ordered_workqueue(wq_name, 0);
+       wq = alloc_ordered_workqueue("%s_%d_hpsa", 0, name, h->ctlr);
        if (!wq)
                dev_err(&h->pdev->dev, "failed to create %s workqueue\n", name);
 
index 73f9feecda72b71552b63eb28b7bec9106f6fd4e..99f43b7fc9ab74256d6f22f17e3c6d75c6e5fcdb 100644 (file)
@@ -1570,9 +1570,7 @@ static int tcm_qla2xxx_check_initiator_node_acl(
         * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
         */
        memset(&port_name, 0, 36);
-       snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
-               fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
-               fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
+       snprintf(port_name, sizeof(port_name), "%8phC", fc_wwpn);
        /*
         * Locate our struct se_node_acl either from an explict NodeACL created
         * via ConfigFS, or via running in TPG demo mode.
index 0cbc1fb45f10eb90ac74b1a20a2c755c4dc6a3cb..2270bd51f9c2c240c669e562eb77052f89425a83 100644 (file)
@@ -546,7 +546,7 @@ static ssize_t
 sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
 {
        sg_io_hdr_t *hp = &srp->header;
-       int err = 0;
+       int err = 0, err2;
        int len;
 
        if (count < SZ_SG_IO_HDR) {
@@ -575,8 +575,8 @@ sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp)
                goto err_out;
        }
 err_out:
-       err = sg_finish_rem_req(srp);
-       return (0 == err) ? count : err;
+       err2 = sg_finish_rem_req(srp);
+       return err ? : err2 ? : count;
 }
 
 static ssize_t
@@ -1335,6 +1335,17 @@ sg_rq_end_io(struct request *rq, int uptodate)
        }
        /* Rely on write phase to clean out srp status values, so no "else" */
 
+       /*
+        * Free the request as soon as it is complete so that its resources
+        * can be reused without waiting for userspace to read() the
+        * result.  But keep the associated bio (if any) around until
+        * blk_rq_unmap_user() can be called from user context.
+        */
+       srp->rq = NULL;
+       if (rq->cmd != rq->__cmd)
+               kfree(rq->cmd);
+       __blk_put_request(rq->q, rq);
+
        write_lock_irqsave(&sfp->rq_list_lock, iflags);
        if (unlikely(srp->orphan)) {
                if (sfp->keep_orphan)
@@ -1669,7 +1680,22 @@ sg_start_req(Sg_request *srp, unsigned char *cmd)
                        return -ENOMEM;
        }
 
-       rq = blk_get_request(q, rw, GFP_ATOMIC);
+       /*
+        * NOTE
+        *
+        * With scsi-mq enabled, there are a fixed number of preallocated
+        * requests equal in number to shost->can_queue.  If all of the
+        * preallocated requests are already in use, then using GFP_ATOMIC with
+        * blk_get_request() will return -EWOULDBLOCK, whereas using GFP_KERNEL
+        * will cause blk_get_request() to sleep until an active command
+        * completes, freeing up a request.  Neither option is ideal, but
+        * GFP_KERNEL is the better choice to prevent userspace from getting an
+        * unexpected EWOULDBLOCK.
+        *
+        * With scsi-mq disabled, blk_get_request() with GFP_KERNEL usually
+        * does not sleep except under memory pressure.
+        */
+       rq = blk_get_request(q, rw, GFP_KERNEL);
        if (IS_ERR(rq)) {
                kfree(long_cmdp);
                return PTR_ERR(rq);
@@ -1759,10 +1785,10 @@ sg_finish_rem_req(Sg_request *srp)
        SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp,
                                      "sg_finish_rem_req: res_used=%d\n",
                                      (int) srp->res_used));
-       if (srp->rq) {
-               if (srp->bio)
-                       ret = blk_rq_unmap_user(srp->bio);
+       if (srp->bio)
+               ret = blk_rq_unmap_user(srp->bio);
 
+       if (srp->rq) {
                if (srp->rq->cmd != srp->rq->__cmd)
                        kfree(srp->rq->cmd);
                blk_put_request(srp->rq);
index 7702664d7ed3257e1670ee7d97ee19cadb1dc7cc..289ad016d92504e9d4ebbc47cf8d244ab6cb68b5 100644 (file)
@@ -870,6 +870,7 @@ fail_free_params:
 }
 
 static struct scsi_host_template wd719x_template = {
+       .module                         = THIS_MODULE,
        .name                           = "Western Digital 719x",
        .queuecommand                   = wd719x_queuecommand,
        .eh_abort_handler               = wd719x_abort,
index 95ccedabba4f9dca37dbd4909e3bc578ec619cd3..ab8dfbef6f1bb681a9ff14ca1e7ca09749b6a199 100644 (file)
@@ -29,7 +29,7 @@ menuconfig SPI
 if SPI
 
 config SPI_DEBUG
-       boolean "Debug support for SPI drivers"
+       bool "Debug support for SPI drivers"
        depends on DEBUG_KERNEL
        help
          Say "yes" to enable debug messaging (like dev_dbg and pr_debug),
@@ -40,8 +40,8 @@ config SPI_DEBUG
 #
 
 config SPI_MASTER
-#      boolean "SPI Master Support"
-       boolean
+#      bool "SPI Master Support"
+       bool
        default SPI
        help
          If your system has an master-capable SPI controller (which
index 7eda0b8b7aab36e187fb03b539cb597276d3da43..0a89ad16371f7ded5824b2da96aa971260f27081 100644 (file)
@@ -1,5 +1,5 @@
 config STAGING_BOARD
-       boolean "Staging Board Support"
+       bool "Staging Board Support"
        depends on OF_ADDRESS
        depends on BROKEN
        help
index 9bc6d3db86d9d353fa0b05dcb528df2a869da218..cc34020204874acb0156a7c7c4f82fe55e4c13f8 100644 (file)
@@ -1,5 +1,5 @@
 config USB_EMXX
-       boolean "EMXX USB Function Device Controller"
+       bool "EMXX USB Function Device Controller"
        depends on USB_GADGET && (ARCH_SHMOBILE || (ARM && COMPILE_TEST))
        help
           The Emma Mobile series of SoCs from Renesas Electronics and
index fa38be0982f99e55f74532d73a0171e5d0005b03..24183028bd712b11af46cd4531f60b33b4e57338 100644 (file)
@@ -30,13 +30,13 @@ config IIO_SIMPLE_DUMMY
 if IIO_SIMPLE_DUMMY
 
 config IIO_SIMPLE_DUMMY_EVENTS
-       boolean "Event generation support"
+       bool "Event generation support"
        select IIO_DUMMY_EVGEN
        help
          Add some dummy events to the simple dummy driver.
 
 config IIO_SIMPLE_DUMMY_BUFFER
-       boolean "Buffered capture support"
+       bool "Buffered capture support"
        select IIO_BUFFER
        select IIO_KFIFO_BUF
        help
index 88614b71cf6d2886950a890ca64e25778ecae602..ddf1fa9f67f8f80fbb8237260a0b4bef3a74b0d4 100644 (file)
@@ -270,7 +270,7 @@ void ll_invalidate_aliases(struct inode *inode)
 
 int ll_revalidate_it_finish(struct ptlrpc_request *request,
                            struct lookup_intent *it,
-                           struct dentry *de)
+                           struct inode *inode)
 {
        int rc = 0;
 
@@ -280,19 +280,17 @@ int ll_revalidate_it_finish(struct ptlrpc_request *request,
        if (it_disposition(it, DISP_LOOKUP_NEG))
                return -ENOENT;
 
-       rc = ll_prep_inode(&de->d_inode, request, NULL, it);
+       rc = ll_prep_inode(&inode, request, NULL, it);
 
        return rc;
 }
 
-void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry)
+void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
 {
        LASSERT(it != NULL);
-       LASSERT(dentry != NULL);
 
-       if (it->d.lustre.it_lock_mode && dentry->d_inode != NULL) {
-               struct inode *inode = dentry->d_inode;
-               struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
+       if (it->d.lustre.it_lock_mode && inode != NULL) {
+               struct ll_sb_info *sbi = ll_i2sbi(inode);
 
                CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
                       inode, inode->i_ino, inode->i_generation);
index 7c7ef7ec908e48467207144b682291a607d89514..5ebee6ca0a108330711cd02aefea5f87eaccd771 100644 (file)
@@ -2912,8 +2912,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
                        oit.it_op = IT_LOOKUP;
 
                /* Call getattr by fid, so do not provide name at all. */
-               op_data = ll_prep_md_op_data(NULL, dentry->d_inode,
-                                            dentry->d_inode, NULL, 0, 0,
+               op_data = ll_prep_md_op_data(NULL, inode,
+                                            inode, NULL, 0, 0,
                                             LUSTRE_OPC_ANY, NULL);
                if (IS_ERR(op_data))
                        return PTR_ERR(op_data);
@@ -2931,7 +2931,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
                        goto out;
                }
 
-               rc = ll_revalidate_it_finish(req, &oit, dentry);
+               rc = ll_revalidate_it_finish(req, &oit, inode);
                if (rc != 0) {
                        ll_intent_release(&oit);
                        goto out;
@@ -2944,7 +2944,7 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
                if (!dentry->d_inode->i_nlink)
                        d_lustre_invalidate(dentry, 0);
 
-               ll_lookup_finish_locks(&oit, dentry);
+               ll_lookup_finish_locks(&oit, inode);
        } else if (!ll_have_md_lock(dentry->d_inode, &ibits, LCK_MINMODE)) {
                struct ll_sb_info *sbi = ll_i2sbi(dentry->d_inode);
                u64 valid = OBD_MD_FLGETATTR;
index d032c2b086ccc535ccd488f1410a251b8811601d..2af1d7286250a32097ac89d1735e08ae3c97ebad 100644 (file)
@@ -786,9 +786,9 @@ extern const struct dentry_operations ll_d_ops;
 void ll_intent_drop_lock(struct lookup_intent *);
 void ll_intent_release(struct lookup_intent *);
 void ll_invalidate_aliases(struct inode *);
-void ll_lookup_finish_locks(struct lookup_intent *it, struct dentry *dentry);
+void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode);
 int ll_revalidate_it_finish(struct ptlrpc_request *request,
-                           struct lookup_intent *it, struct dentry *de);
+                           struct lookup_intent *it, struct inode *inode);
 
 /* llite/llite_lib.c */
 extern struct super_operations lustre_super_operations;
index 4f361b77c749a718621c8767ec97663e43a7c87f..890ac190f5faf3300ab8461f08cfe63182078c27 100644 (file)
@@ -481,6 +481,7 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
        struct lookup_intent lookup_it = { .it_op = IT_LOOKUP };
        struct dentry *save = dentry, *retval;
        struct ptlrpc_request *req = NULL;
+       struct inode *inode;
        struct md_op_data *op_data;
        __u32 opc;
        int rc;
@@ -539,12 +540,13 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
                goto out;
        }
 
-       if ((it->it_op & IT_OPEN) && dentry->d_inode &&
-           !S_ISREG(dentry->d_inode->i_mode) &&
-           !S_ISDIR(dentry->d_inode->i_mode)) {
-               ll_release_openhandle(dentry->d_inode, it);
+       inode = dentry->d_inode;
+       if ((it->it_op & IT_OPEN) && inode &&
+           !S_ISREG(inode->i_mode) &&
+           !S_ISDIR(inode->i_mode)) {
+               ll_release_openhandle(inode, it);
        }
-       ll_lookup_finish_locks(it, dentry);
+       ll_lookup_finish_locks(it, inode);
 
        if (dentry == save)
                retval = NULL;
index aebde3289c50de6722062dfdea21fa1c549090cd..50bad55a0c42e3bd9eef6925520cb1e9bddf2209 100644 (file)
@@ -30,7 +30,7 @@
 #include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_tq.h"
@@ -45,7 +45,7 @@
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
 #include "iscsi_target_device.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
 
 #include <target/iscsi/iscsi_transport.h>
 
@@ -968,11 +968,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        conn->sess->init_task_tag = cmd->init_task_tag = hdr->itt;
        if (hdr->flags & ISCSI_FLAG_CMD_READ) {
-               spin_lock_bh(&conn->sess->ttt_lock);
-               cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-               if (cmd->targ_xfer_tag == 0xFFFFFFFF)
-                       cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-               spin_unlock_bh(&conn->sess->ttt_lock);
+               cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
        } else if (hdr->flags & ISCSI_FLAG_CMD_WRITE)
                cmd->targ_xfer_tag = 0xFFFFFFFF;
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
@@ -1998,6 +1994,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        cmd->cmd_sn             = be32_to_cpu(hdr->cmdsn);
        cmd->exp_stat_sn        = be32_to_cpu(hdr->exp_statsn);
        cmd->data_direction     = DMA_NONE;
+       cmd->text_in_ptr        = NULL;
 
        return 0;
 }
@@ -2011,9 +2008,13 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        int cmdsn_ret;
 
        if (!text_in) {
-               pr_err("Unable to locate text_in buffer for sendtargets"
-                      " discovery\n");
-               goto reject;
+               cmd->targ_xfer_tag = be32_to_cpu(hdr->ttt);
+               if (cmd->targ_xfer_tag == 0xFFFFFFFF) {
+                       pr_err("Unable to locate text_in buffer for sendtargets"
+                              " discovery\n");
+                       goto reject;
+               }
+               goto empty_sendtargets;
        }
        if (strncmp("SendTargets", text_in, 11) != 0) {
                pr_err("Received Text Data that is not"
@@ -2040,6 +2041,7 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
        spin_unlock_bh(&conn->cmd_lock);
 
+empty_sendtargets:
        iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
 
        if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
@@ -3047,11 +3049,7 @@ static int iscsit_send_r2t(
        int_to_scsilun(cmd->se_cmd.orig_fe_lun,
                        (struct scsi_lun *)&hdr->lun);
        hdr->itt                = cmd->init_task_tag;
-       spin_lock_bh(&conn->sess->ttt_lock);
-       r2t->targ_xfer_tag      = conn->sess->targ_xfer_tag++;
-       if (r2t->targ_xfer_tag == 0xFFFFFFFF)
-               r2t->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-       spin_unlock_bh(&conn->sess->ttt_lock);
+       r2t->targ_xfer_tag      = session_get_next_ttt(conn->sess);
        hdr->ttt                = cpu_to_be32(r2t->targ_xfer_tag);
        hdr->statsn             = cpu_to_be32(conn->stat_sn);
        hdr->exp_cmdsn          = cpu_to_be32(conn->sess->exp_cmd_sn);
@@ -3393,7 +3391,8 @@ static bool iscsit_check_inaddr_any(struct iscsi_np *np)
 
 static int
 iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
-                                 enum iscsit_transport_type network_transport)
+                                 enum iscsit_transport_type network_transport,
+                                 int skip_bytes, bool *completed)
 {
        char *payload = NULL;
        struct iscsi_conn *conn = cmd->conn;
@@ -3405,7 +3404,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
        unsigned char buf[ISCSI_IQN_LEN+12]; /* iqn + "TargetName=" + \0 */
        unsigned char *text_in = cmd->text_in_ptr, *text_ptr = NULL;
 
-       buffer_len = max(conn->conn_ops->MaxRecvDataSegmentLength,
+       buffer_len = min(conn->conn_ops->MaxRecvDataSegmentLength,
                         SENDTARGETS_BUF_LIMIT);
 
        payload = kzalloc(buffer_len, GFP_KERNEL);
@@ -3484,9 +3483,16 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
                                                end_of_buf = 1;
                                                goto eob;
                                        }
-                                       memcpy(payload + payload_len, buf, len);
-                                       payload_len += len;
-                                       target_name_printed = 1;
+
+                                       if (skip_bytes && len <= skip_bytes) {
+                                               skip_bytes -= len;
+                                       } else {
+                                               memcpy(payload + payload_len, buf, len);
+                                               payload_len += len;
+                                               target_name_printed = 1;
+                                               if (len > skip_bytes)
+                                                       skip_bytes = 0;
+                                       }
                                }
 
                                len = sprintf(buf, "TargetAddress="
@@ -3502,15 +3508,24 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
                                        end_of_buf = 1;
                                        goto eob;
                                }
-                               memcpy(payload + payload_len, buf, len);
-                               payload_len += len;
+
+                               if (skip_bytes && len <= skip_bytes) {
+                                       skip_bytes -= len;
+                               } else {
+                                       memcpy(payload + payload_len, buf, len);
+                                       payload_len += len;
+                                       if (len > skip_bytes)
+                                               skip_bytes = 0;
+                               }
                        }
                        spin_unlock(&tpg->tpg_np_lock);
                }
                spin_unlock(&tiqn->tiqn_tpg_lock);
 eob:
-               if (end_of_buf)
+               if (end_of_buf) {
+                       *completed = false;
                        break;
+               }
 
                if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
                        break;
@@ -3528,13 +3543,23 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
                      enum iscsit_transport_type network_transport)
 {
        int text_length, padding;
+       bool completed = true;
 
-       text_length = iscsit_build_sendtargets_response(cmd, network_transport);
+       text_length = iscsit_build_sendtargets_response(cmd, network_transport,
+                                                       cmd->read_data_done,
+                                                       &completed);
        if (text_length < 0)
                return text_length;
 
+       if (completed) {
+               hdr->flags |= ISCSI_FLAG_CMD_FINAL;
+       } else {
+               hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE;
+               cmd->read_data_done += text_length;
+               if (cmd->targ_xfer_tag == 0xFFFFFFFF)
+                       cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
+       }
        hdr->opcode = ISCSI_OP_TEXT_RSP;
-       hdr->flags |= ISCSI_FLAG_CMD_FINAL;
        padding = ((-text_length) & 3);
        hton24(hdr->dlength, text_length);
        hdr->itt = cmd->init_task_tag;
@@ -3543,21 +3568,25 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
        hdr->statsn = cpu_to_be32(cmd->stat_sn);
 
        iscsit_increment_maxcmdsn(cmd, conn->sess);
+       /*
+        * Reset maxcmdsn_inc in multi-part text payload exchanges to
+        * correctly increment MaxCmdSN for each response answering a
+        * non immediate text request with a valid CmdSN.
+        */
+       cmd->maxcmdsn_inc = 0;
        hdr->exp_cmdsn = cpu_to_be32(conn->sess->exp_cmd_sn);
        hdr->max_cmdsn = cpu_to_be32(conn->sess->max_cmd_sn);
 
-       pr_debug("Built Text Response: ITT: 0x%08x, StatSN: 0x%08x,"
-               " Length: %u, CID: %hu\n", cmd->init_task_tag, cmd->stat_sn,
-               text_length, conn->cid);
+       pr_debug("Built Text Response: ITT: 0x%08x, TTT: 0x%08x, StatSN: 0x%08x,"
+               " Length: %u, CID: %hu F: %d C: %d\n", cmd->init_task_tag,
+               cmd->targ_xfer_tag, cmd->stat_sn, text_length, conn->cid,
+               !!(hdr->flags & ISCSI_FLAG_CMD_FINAL),
+               !!(hdr->flags & ISCSI_FLAG_TEXT_CONTINUE));
 
        return text_length + padding;
 }
 EXPORT_SYMBOL(iscsit_build_text_rsp);
 
-/*
- *     FIXME: Add support for F_BIT and C_BIT when the length is longer than
- *     MaxRecvDataSegmentLength.
- */
 static int iscsit_send_text_rsp(
        struct iscsi_cmd *cmd,
        struct iscsi_conn *conn)
@@ -4021,9 +4050,15 @@ static int iscsi_target_rx_opcode(struct iscsi_conn *conn, unsigned char *buf)
                ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
                break;
        case ISCSI_OP_TEXT:
-               cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
-               if (!cmd)
-                       goto reject;
+               if (hdr->ttt != cpu_to_be32(0xFFFFFFFF)) {
+                       cmd = iscsit_find_cmd_from_itt(conn, hdr->itt);
+                       if (!cmd)
+                               goto reject;
+               } else {
+                       cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
+                       if (!cmd)
+                               goto reject;
+               }
 
                ret = iscsit_handle_text_cmd(conn, cmd, buf);
                break;
index ab4915c0d933a07021b076cf15232b85dc9dcf08..47e249dccb5fe7d9652bea77bddc00b35dd98429 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/err.h>
 #include <linux/scatterlist.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_nego.h"
 #include "iscsi_target_auth.h"
 
index 9059c1e0b26e559dc0c1f69b3b97625d7cb806e4..48384b675e624b9b04011fe03750c11170961ef1 100644 (file)
@@ -28,7 +28,7 @@
 #include <target/configfs_macros.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_device.h"
 #include "iscsi_target_erl0.h"
@@ -36,7 +36,7 @@
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
 #include "iscsi_target_configfs.h"
 
 struct target_fabric_configfs *lio_target_fabric_configfs;
@@ -674,12 +674,9 @@ static ssize_t lio_target_nacl_show_info(
                rb += sprintf(page+rb, "InitiatorAlias: %s\n",
                        sess->sess_ops->InitiatorAlias);
 
-               rb += sprintf(page+rb, "LIO Session ID: %u   "
-                       "ISID: 0x%02x %02x %02x %02x %02x %02x  "
-                       "TSIH: %hu  ", sess->sid,
-                       sess->isid[0], sess->isid[1], sess->isid[2],
-                       sess->isid[3], sess->isid[4], sess->isid[5],
-                       sess->tsih);
+               rb += sprintf(page+rb,
+                             "LIO Session ID: %u   ISID: 0x%6ph  TSIH: %hu  ",
+                             sess->sid, sess->isid, sess->tsih);
                rb += sprintf(page+rb, "SessionType: %s\n",
                                (sess->sess_ops->SessionType) ?
                                "Discovery" : "Normal");
@@ -1758,9 +1755,7 @@ static u32 lio_sess_get_initiator_sid(
        /*
         * iSCSI Initiator Session Identifier from RFC-3720.
         */
-       return snprintf(buf, size, "%02x%02x%02x%02x%02x%02x",
-               sess->isid[0], sess->isid[1], sess->isid[2],
-               sess->isid[3], sess->isid[4], sess->isid[5]);
+       return snprintf(buf, size, "%6phN", sess->isid);
 }
 
 static int lio_queue_data_in(struct se_cmd *se_cmd)
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
deleted file mode 100644 (file)
index cbcff38..0000000
+++ /dev/null
@@ -1,883 +0,0 @@
-#ifndef ISCSI_TARGET_CORE_H
-#define ISCSI_TARGET_CORE_H
-
-#include <linux/in.h>
-#include <linux/configfs.h>
-#include <net/sock.h>
-#include <net/tcp.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/iscsi_proto.h>
-#include <target/target_core_base.h>
-
-#define ISCSIT_VERSION                 "v4.1.0"
-#define ISCSI_MAX_DATASN_MISSING_COUNT 16
-#define ISCSI_TX_THREAD_TCP_TIMEOUT    2
-#define ISCSI_RX_THREAD_TCP_TIMEOUT    2
-#define SECONDS_FOR_ASYNC_LOGOUT       10
-#define SECONDS_FOR_ASYNC_TEXT         10
-#define SECONDS_FOR_LOGOUT_COMP                15
-#define WHITE_SPACE                    " \t\v\f\n\r"
-#define ISCSIT_MIN_TAGS                        16
-#define ISCSIT_EXTRA_TAGS              8
-#define ISCSIT_TCP_BACKLOG             256
-
-/* struct iscsi_node_attrib sanity values */
-#define NA_DATAOUT_TIMEOUT             3
-#define NA_DATAOUT_TIMEOUT_MAX         60
-#define NA_DATAOUT_TIMEOUT_MIX         2
-#define NA_DATAOUT_TIMEOUT_RETRIES     5
-#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
-#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
-#define NA_NOPIN_TIMEOUT               15
-#define NA_NOPIN_TIMEOUT_MAX           60
-#define NA_NOPIN_TIMEOUT_MIN           3
-#define NA_NOPIN_RESPONSE_TIMEOUT      30
-#define NA_NOPIN_RESPONSE_TIMEOUT_MAX  60
-#define NA_NOPIN_RESPONSE_TIMEOUT_MIN  3
-#define NA_RANDOM_DATAIN_PDU_OFFSETS   0
-#define NA_RANDOM_DATAIN_SEQ_OFFSETS   0
-#define NA_RANDOM_R2T_OFFSETS          0
-
-/* struct iscsi_tpg_attrib sanity values */
-#define TA_AUTHENTICATION              1
-#define TA_LOGIN_TIMEOUT               15
-#define TA_LOGIN_TIMEOUT_MAX           30
-#define TA_LOGIN_TIMEOUT_MIN           5
-#define TA_NETIF_TIMEOUT               2
-#define TA_NETIF_TIMEOUT_MAX           15
-#define TA_NETIF_TIMEOUT_MIN           2
-#define TA_GENERATE_NODE_ACLS          0
-#define TA_DEFAULT_CMDSN_DEPTH         64
-#define TA_DEFAULT_CMDSN_DEPTH_MAX     512
-#define TA_DEFAULT_CMDSN_DEPTH_MIN     1
-#define TA_CACHE_DYNAMIC_ACLS          0
-/* Enabled by default in demo mode (generic_node_acls=1) */
-#define TA_DEMO_MODE_WRITE_PROTECT     1
-/* Disabled by default in production mode w/ explict ACLs */
-#define TA_PROD_MODE_WRITE_PROTECT     0
-#define TA_DEMO_MODE_DISCOVERY         1
-#define TA_DEFAULT_ERL                 0
-#define TA_CACHE_CORE_NPS              0
-/* T10 protection information disabled by default */
-#define TA_DEFAULT_T10_PI              0
-
-#define ISCSI_IOV_DATA_BUFFER          5
-
-enum iscsit_transport_type {
-       ISCSI_TCP                               = 0,
-       ISCSI_SCTP_TCP                          = 1,
-       ISCSI_SCTP_UDP                          = 2,
-       ISCSI_IWARP_TCP                         = 3,
-       ISCSI_IWARP_SCTP                        = 4,
-       ISCSI_INFINIBAND                        = 5,
-};
-
-/* RFC-3720 7.1.4  Standard Connection State Diagram for a Target */
-enum target_conn_state_table {
-       TARG_CONN_STATE_FREE                    = 0x1,
-       TARG_CONN_STATE_XPT_UP                  = 0x3,
-       TARG_CONN_STATE_IN_LOGIN                = 0x4,
-       TARG_CONN_STATE_LOGGED_IN               = 0x5,
-       TARG_CONN_STATE_IN_LOGOUT               = 0x6,
-       TARG_CONN_STATE_LOGOUT_REQUESTED        = 0x7,
-       TARG_CONN_STATE_CLEANUP_WAIT            = 0x8,
-};
-
-/* RFC-3720 7.3.2  Session State Diagram for a Target */
-enum target_sess_state_table {
-       TARG_SESS_STATE_FREE                    = 0x1,
-       TARG_SESS_STATE_ACTIVE                  = 0x2,
-       TARG_SESS_STATE_LOGGED_IN               = 0x3,
-       TARG_SESS_STATE_FAILED                  = 0x4,
-       TARG_SESS_STATE_IN_CONTINUE             = 0x5,
-};
-
-/* struct iscsi_data_count->type */
-enum data_count_type {
-       ISCSI_RX_DATA   = 1,
-       ISCSI_TX_DATA   = 2,
-};
-
-/* struct iscsi_datain_req->dr_complete */
-enum datain_req_comp_table {
-       DATAIN_COMPLETE_NORMAL                  = 1,
-       DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
-       DATAIN_COMPLETE_CONNECTION_RECOVERY     = 3,
-};
-
-/* struct iscsi_datain_req->recovery */
-enum datain_req_rec_table {
-       DATAIN_WITHIN_COMMAND_RECOVERY          = 1,
-       DATAIN_CONNECTION_RECOVERY              = 2,
-};
-
-/* struct iscsi_portal_group->state */
-enum tpg_state_table {
-       TPG_STATE_FREE                          = 0,
-       TPG_STATE_ACTIVE                        = 1,
-       TPG_STATE_INACTIVE                      = 2,
-       TPG_STATE_COLD_RESET                    = 3,
-};
-
-/* struct iscsi_tiqn->tiqn_state */
-enum tiqn_state_table {
-       TIQN_STATE_ACTIVE                       = 1,
-       TIQN_STATE_SHUTDOWN                     = 2,
-};
-
-/* struct iscsi_cmd->cmd_flags */
-enum cmd_flags_table {
-       ICF_GOT_LAST_DATAOUT                    = 0x00000001,
-       ICF_GOT_DATACK_SNACK                    = 0x00000002,
-       ICF_NON_IMMEDIATE_UNSOLICITED_DATA      = 0x00000004,
-       ICF_SENT_LAST_R2T                       = 0x00000008,
-       ICF_WITHIN_COMMAND_RECOVERY             = 0x00000010,
-       ICF_CONTIG_MEMORY                       = 0x00000020,
-       ICF_ATTACHED_TO_RQUEUE                  = 0x00000040,
-       ICF_OOO_CMDSN                           = 0x00000080,
-       ICF_SENDTARGETS_ALL                     = 0x00000100,
-       ICF_SENDTARGETS_SINGLE                  = 0x00000200,
-};
-
-/* struct iscsi_cmd->i_state */
-enum cmd_i_state_table {
-       ISTATE_NO_STATE                 = 0,
-       ISTATE_NEW_CMD                  = 1,
-       ISTATE_DEFERRED_CMD             = 2,
-       ISTATE_UNSOLICITED_DATA         = 3,
-       ISTATE_RECEIVE_DATAOUT          = 4,
-       ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
-       ISTATE_RECEIVED_LAST_DATAOUT    = 6,
-       ISTATE_WITHIN_DATAOUT_RECOVERY  = 7,
-       ISTATE_IN_CONNECTION_RECOVERY   = 8,
-       ISTATE_RECEIVED_TASKMGT         = 9,
-       ISTATE_SEND_ASYNCMSG            = 10,
-       ISTATE_SENT_ASYNCMSG            = 11,
-       ISTATE_SEND_DATAIN              = 12,
-       ISTATE_SEND_LAST_DATAIN         = 13,
-       ISTATE_SENT_LAST_DATAIN         = 14,
-       ISTATE_SEND_LOGOUTRSP           = 15,
-       ISTATE_SENT_LOGOUTRSP           = 16,
-       ISTATE_SEND_NOPIN               = 17,
-       ISTATE_SENT_NOPIN               = 18,
-       ISTATE_SEND_REJECT              = 19,
-       ISTATE_SENT_REJECT              = 20,
-       ISTATE_SEND_R2T                 = 21,
-       ISTATE_SENT_R2T                 = 22,
-       ISTATE_SEND_R2T_RECOVERY        = 23,
-       ISTATE_SENT_R2T_RECOVERY        = 24,
-       ISTATE_SEND_LAST_R2T            = 25,
-       ISTATE_SENT_LAST_R2T            = 26,
-       ISTATE_SEND_LAST_R2T_RECOVERY   = 27,
-       ISTATE_SENT_LAST_R2T_RECOVERY   = 28,
-       ISTATE_SEND_STATUS              = 29,
-       ISTATE_SEND_STATUS_BROKEN_PC    = 30,
-       ISTATE_SENT_STATUS              = 31,
-       ISTATE_SEND_STATUS_RECOVERY     = 32,
-       ISTATE_SENT_STATUS_RECOVERY     = 33,
-       ISTATE_SEND_TASKMGTRSP          = 34,
-       ISTATE_SENT_TASKMGTRSP          = 35,
-       ISTATE_SEND_TEXTRSP             = 36,
-       ISTATE_SENT_TEXTRSP             = 37,
-       ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
-       ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
-       ISTATE_SEND_NOPIN_NO_RESPONSE   = 40,
-       ISTATE_REMOVE                   = 41,
-       ISTATE_FREE                     = 42,
-};
-
-/* Used for iscsi_recover_cmdsn() return values */
-enum recover_cmdsn_ret_table {
-       CMDSN_ERROR_CANNOT_RECOVER      = -1,
-       CMDSN_NORMAL_OPERATION          = 0,
-       CMDSN_LOWER_THAN_EXP            = 1,
-       CMDSN_HIGHER_THAN_EXP           = 2,
-       CMDSN_MAXCMDSN_OVERRUN          = 3,
-};
-
-/* Used for iscsi_handle_immediate_data() return values */
-enum immedate_data_ret_table {
-       IMMEDIATE_DATA_CANNOT_RECOVER   = -1,
-       IMMEDIATE_DATA_NORMAL_OPERATION = 0,
-       IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
-};
-
-/* Used for iscsi_decide_dataout_action() return values */
-enum dataout_action_ret_table {
-       DATAOUT_CANNOT_RECOVER          = -1,
-       DATAOUT_NORMAL                  = 0,
-       DATAOUT_SEND_R2T                = 1,
-       DATAOUT_SEND_TO_TRANSPORT       = 2,
-       DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
-};
-
-/* Used for struct iscsi_node_auth->naf_flags */
-enum naf_flags_table {
-       NAF_USERID_SET                  = 0x01,
-       NAF_PASSWORD_SET                = 0x02,
-       NAF_USERID_IN_SET               = 0x04,
-       NAF_PASSWORD_IN_SET             = 0x08,
-};
-
-/* Used by various struct timer_list to manage iSCSI specific state */
-enum iscsi_timer_flags_table {
-       ISCSI_TF_RUNNING                = 0x01,
-       ISCSI_TF_STOP                   = 0x02,
-       ISCSI_TF_EXPIRED                = 0x04,
-};
-
-/* Used for struct iscsi_np->np_flags */
-enum np_flags_table {
-       NPF_IP_NETWORK          = 0x00,
-};
-
-/* Used for struct iscsi_np->np_thread_state */
-enum np_thread_state_table {
-       ISCSI_NP_THREAD_ACTIVE          = 1,
-       ISCSI_NP_THREAD_INACTIVE        = 2,
-       ISCSI_NP_THREAD_RESET           = 3,
-       ISCSI_NP_THREAD_SHUTDOWN        = 4,
-       ISCSI_NP_THREAD_EXIT            = 5,
-};
-
-struct iscsi_conn_ops {
-       u8      HeaderDigest;                   /* [0,1] == [None,CRC32C] */
-       u8      DataDigest;                     /* [0,1] == [None,CRC32C] */
-       u32     MaxRecvDataSegmentLength;       /* [512..2**24-1] */
-       u32     MaxXmitDataSegmentLength;       /* [512..2**24-1] */
-       u8      OFMarker;                       /* [0,1] == [No,Yes] */
-       u8      IFMarker;                       /* [0,1] == [No,Yes] */
-       u32     OFMarkInt;                      /* [1..65535] */
-       u32     IFMarkInt;                      /* [1..65535] */
-       /*
-        * iSER specific connection parameters
-        */
-       u32     InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
-       u32     TargetRecvDataSegmentLength;    /* [512..2**24-1] */
-};
-
-struct iscsi_sess_ops {
-       char    InitiatorName[224];
-       char    InitiatorAlias[256];
-       char    TargetName[224];
-       char    TargetAlias[256];
-       char    TargetAddress[256];
-       u16     TargetPortalGroupTag;           /* [0..65535] */
-       u16     MaxConnections;                 /* [1..65535] */
-       u8      InitialR2T;                     /* [0,1] == [No,Yes] */
-       u8      ImmediateData;                  /* [0,1] == [No,Yes] */
-       u32     MaxBurstLength;                 /* [512..2**24-1] */
-       u32     FirstBurstLength;               /* [512..2**24-1] */
-       u16     DefaultTime2Wait;               /* [0..3600] */
-       u16     DefaultTime2Retain;             /* [0..3600] */
-       u16     MaxOutstandingR2T;              /* [1..65535] */
-       u8      DataPDUInOrder;                 /* [0,1] == [No,Yes] */
-       u8      DataSequenceInOrder;            /* [0,1] == [No,Yes] */
-       u8      ErrorRecoveryLevel;             /* [0..2] */
-       u8      SessionType;                    /* [0,1] == [Normal,Discovery]*/
-       /*
-        * iSER specific session parameters
-        */
-       u8      RDMAExtensions;                 /* [0,1] == [No,Yes] */
-};
-
-struct iscsi_queue_req {
-       int                     state;
-       struct iscsi_cmd        *cmd;
-       struct list_head        qr_list;
-};
-
-struct iscsi_data_count {
-       int                     data_length;
-       int                     sync_and_steering;
-       enum data_count_type    type;
-       u32                     iov_count;
-       u32                     ss_iov_count;
-       u32                     ss_marker_count;
-       struct kvec             *iov;
-};
-
-struct iscsi_param_list {
-       bool                    iser;
-       struct list_head        param_list;
-       struct list_head        extra_response_list;
-};
-
-struct iscsi_datain_req {
-       enum datain_req_comp_table dr_complete;
-       int                     generate_recovery_values;
-       enum datain_req_rec_table recovery;
-       u32                     begrun;
-       u32                     runlength;
-       u32                     data_length;
-       u32                     data_offset;
-       u32                     data_sn;
-       u32                     next_burst_len;
-       u32                     read_data_done;
-       u32                     seq_send_order;
-       struct list_head        cmd_datain_node;
-} ____cacheline_aligned;
-
-struct iscsi_ooo_cmdsn {
-       u16                     cid;
-       u32                     batch_count;
-       u32                     cmdsn;
-       u32                     exp_cmdsn;
-       struct iscsi_cmd        *cmd;
-       struct list_head        ooo_list;
-} ____cacheline_aligned;
-
-struct iscsi_datain {
-       u8                      flags;
-       u32                     data_sn;
-       u32                     length;
-       u32                     offset;
-} ____cacheline_aligned;
-
-struct iscsi_r2t {
-       int                     seq_complete;
-       int                     recovery_r2t;
-       int                     sent_r2t;
-       u32                     r2t_sn;
-       u32                     offset;
-       u32                     targ_xfer_tag;
-       u32                     xfer_len;
-       struct list_head        r2t_list;
-} ____cacheline_aligned;
-
-struct iscsi_cmd {
-       enum iscsi_timer_flags_table dataout_timer_flags;
-       /* DataOUT timeout retries */
-       u8                      dataout_timeout_retries;
-       /* Within command recovery count */
-       u8                      error_recovery_count;
-       /* iSCSI dependent state for out or order CmdSNs */
-       enum cmd_i_state_table  deferred_i_state;
-       /* iSCSI dependent state */
-       enum cmd_i_state_table  i_state;
-       /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
-       u8                      immediate_cmd;
-       /* Immediate data present */
-       u8                      immediate_data;
-       /* iSCSI Opcode */
-       u8                      iscsi_opcode;
-       /* iSCSI Response Code */
-       u8                      iscsi_response;
-       /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
-       u8                      logout_reason;
-       /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
-       u8                      logout_response;
-       /* MaxCmdSN has been incremented */
-       u8                      maxcmdsn_inc;
-       /* Immediate Unsolicited Dataout */
-       u8                      unsolicited_data;
-       /* Reject reason code */
-       u8                      reject_reason;
-       /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
-       u16                     logout_cid;
-       /* Command flags */
-       enum cmd_flags_table    cmd_flags;
-       /* Initiator Task Tag assigned from Initiator */
-       itt_t                   init_task_tag;
-       /* Target Transfer Tag assigned from Target */
-       u32                     targ_xfer_tag;
-       /* CmdSN assigned from Initiator */
-       u32                     cmd_sn;
-       /* ExpStatSN assigned from Initiator */
-       u32                     exp_stat_sn;
-       /* StatSN assigned to this ITT */
-       u32                     stat_sn;
-       /* DataSN Counter */
-       u32                     data_sn;
-       /* R2TSN Counter */
-       u32                     r2t_sn;
-       /* Last DataSN acknowledged via DataAck SNACK */
-       u32                     acked_data_sn;
-       /* Used for echoing NOPOUT ping data */
-       u32                     buf_ptr_size;
-       /* Used to store DataDigest */
-       u32                     data_crc;
-       /* Counter for MaxOutstandingR2T */
-       u32                     outstanding_r2ts;
-       /* Next R2T Offset when DataSequenceInOrder=Yes */
-       u32                     r2t_offset;
-       /* Iovec current and orig count for iscsi_cmd->iov_data */
-       u32                     iov_data_count;
-       u32                     orig_iov_data_count;
-       /* Number of miscellaneous iovecs used for IP stack calls */
-       u32                     iov_misc_count;
-       /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
-       u32                     pdu_count;
-       /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
-       u32                     pdu_send_order;
-       /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
-       u32                     pdu_start;
-       /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
-       u32                     seq_send_order;
-       /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
-       u32                     seq_count;
-       /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
-       u32                     seq_no;
-       /* Lowest offset in current DataOUT sequence */
-       u32                     seq_start_offset;
-       /* Highest offset in current DataOUT sequence */
-       u32                     seq_end_offset;
-       /* Total size in bytes received so far of READ data */
-       u32                     read_data_done;
-       /* Total size in bytes received so far of WRITE data */
-       u32                     write_data_done;
-       /* Counter for FirstBurstLength key */
-       u32                     first_burst_len;
-       /* Counter for MaxBurstLength key */
-       u32                     next_burst_len;
-       /* Transfer size used for IP stack calls */
-       u32                     tx_size;
-       /* Buffer used for various purposes */
-       void                    *buf_ptr;
-       /* Used by SendTargets=[iqn.,eui.] discovery */
-       void                    *text_in_ptr;
-       /* See include/linux/dma-mapping.h */
-       enum dma_data_direction data_direction;
-       /* iSCSI PDU Header + CRC */
-       unsigned char           pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
-       /* Number of times struct iscsi_cmd is present in immediate queue */
-       atomic_t                immed_queue_count;
-       atomic_t                response_queue_count;
-       spinlock_t              datain_lock;
-       spinlock_t              dataout_timeout_lock;
-       /* spinlock for protecting struct iscsi_cmd->i_state */
-       spinlock_t              istate_lock;
-       /* spinlock for adding within command recovery entries */
-       spinlock_t              error_lock;
-       /* spinlock for adding R2Ts */
-       spinlock_t              r2t_lock;
-       /* DataIN List */
-       struct list_head        datain_list;
-       /* R2T List */
-       struct list_head        cmd_r2t_list;
-       /* Timer for DataOUT */
-       struct timer_list       dataout_timer;
-       /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
-       struct kvec             *iov_data;
-       /* Iovecs for miscellaneous purposes */
-#define ISCSI_MISC_IOVECS                      5
-       struct kvec             iov_misc[ISCSI_MISC_IOVECS];
-       /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
-       struct iscsi_pdu        *pdu_list;
-       /* Current struct iscsi_pdu used for DataPDUInOrder=No */
-       struct iscsi_pdu        *pdu_ptr;
-       /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
-       struct iscsi_seq        *seq_list;
-       /* Current struct iscsi_seq used for DataSequenceInOrder=No */
-       struct iscsi_seq        *seq_ptr;
-       /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
-       struct iscsi_tmr_req    *tmr_req;
-       /* Connection this command is alligient to */
-       struct iscsi_conn       *conn;
-       /* Pointer to connection recovery entry */
-       struct iscsi_conn_recovery *cr;
-       /* Session the command is part of,  used for connection recovery */
-       struct iscsi_session    *sess;
-       /* list_head for connection list */
-       struct list_head        i_conn_node;
-       /* The TCM I/O descriptor that is accessed via container_of() */
-       struct se_cmd           se_cmd;
-       /* Sense buffer that will be mapped into outgoing status */
-#define ISCSI_SENSE_BUFFER_LEN          (TRANSPORT_SENSE_BUFFER + 2)
-       unsigned char           sense_buffer[ISCSI_SENSE_BUFFER_LEN];
-
-       u32                     padding;
-       u8                      pad_bytes[4];
-
-       struct scatterlist      *first_data_sg;
-       u32                     first_data_sg_off;
-       u32                     kmapped_nents;
-       sense_reason_t          sense_reason;
-}  ____cacheline_aligned;
-
-struct iscsi_tmr_req {
-       bool                    task_reassign:1;
-       u32                     exp_data_sn;
-       struct iscsi_cmd        *ref_cmd;
-       struct iscsi_conn_recovery *conn_recovery;
-       struct se_tmr_req       *se_tmr_req;
-};
-
-struct iscsi_conn {
-       wait_queue_head_t       queues_wq;
-       /* Authentication Successful for this connection */
-       u8                      auth_complete;
-       /* State connection is currently in */
-       u8                      conn_state;
-       u8                      conn_logout_reason;
-       u8                      network_transport;
-       enum iscsi_timer_flags_table nopin_timer_flags;
-       enum iscsi_timer_flags_table nopin_response_timer_flags;
-       /* Used to know what thread encountered a transport failure */
-       u8                      which_thread;
-       /* connection id assigned by the Initiator */
-       u16                     cid;
-       /* Remote TCP Port */
-       u16                     login_port;
-       u16                     local_port;
-       int                     net_size;
-       int                     login_family;
-       u32                     auth_id;
-       u32                     conn_flags;
-       /* Used for iscsi_tx_login_rsp() */
-       itt_t                   login_itt;
-       u32                     exp_statsn;
-       /* Per connection status sequence number */
-       u32                     stat_sn;
-       /* IFMarkInt's Current Value */
-       u32                     if_marker;
-       /* OFMarkInt's Current Value */
-       u32                     of_marker;
-       /* Used for calculating OFMarker offset to next PDU */
-       u32                     of_marker_offset;
-#define IPV6_ADDRESS_SPACE                             48
-       unsigned char           login_ip[IPV6_ADDRESS_SPACE];
-       unsigned char           local_ip[IPV6_ADDRESS_SPACE];
-       int                     conn_usage_count;
-       int                     conn_waiting_on_uc;
-       atomic_t                check_immediate_queue;
-       atomic_t                conn_logout_remove;
-       atomic_t                connection_exit;
-       atomic_t                connection_recovery;
-       atomic_t                connection_reinstatement;
-       atomic_t                connection_wait_rcfr;
-       atomic_t                sleep_on_conn_wait_comp;
-       atomic_t                transport_failed;
-       struct completion       conn_post_wait_comp;
-       struct completion       conn_wait_comp;
-       struct completion       conn_wait_rcfr_comp;
-       struct completion       conn_waiting_on_uc_comp;
-       struct completion       conn_logout_comp;
-       struct completion       tx_half_close_comp;
-       struct completion       rx_half_close_comp;
-       /* socket used by this connection */
-       struct socket           *sock;
-       void                    (*orig_data_ready)(struct sock *);
-       void                    (*orig_state_change)(struct sock *);
-#define LOGIN_FLAGS_READ_ACTIVE                1
-#define LOGIN_FLAGS_CLOSED             2
-#define LOGIN_FLAGS_READY              4
-       unsigned long           login_flags;
-       struct delayed_work     login_work;
-       struct delayed_work     login_cleanup_work;
-       struct iscsi_login      *login;
-       struct timer_list       nopin_timer;
-       struct timer_list       nopin_response_timer;
-       struct timer_list       transport_timer;
-       struct task_struct      *login_kworker;
-       /* Spinlock used for add/deleting cmd's from conn_cmd_list */
-       spinlock_t              cmd_lock;
-       spinlock_t              conn_usage_lock;
-       spinlock_t              immed_queue_lock;
-       spinlock_t              nopin_timer_lock;
-       spinlock_t              response_queue_lock;
-       spinlock_t              state_lock;
-       /* libcrypto RX and TX contexts for crc32c */
-       struct hash_desc        conn_rx_hash;
-       struct hash_desc        conn_tx_hash;
-       /* Used for scheduling TX and RX connection kthreads */
-       cpumask_var_t           conn_cpumask;
-       unsigned int            conn_rx_reset_cpumask:1;
-       unsigned int            conn_tx_reset_cpumask:1;
-       /* list_head of struct iscsi_cmd for this connection */
-       struct list_head        conn_cmd_list;
-       struct list_head        immed_queue_list;
-       struct list_head        response_queue_list;
-       struct iscsi_conn_ops   *conn_ops;
-       struct iscsi_login      *conn_login;
-       struct iscsit_transport *conn_transport;
-       struct iscsi_param_list *param_list;
-       /* Used for per connection auth state machine */
-       void                    *auth_protocol;
-       void                    *context;
-       struct iscsi_login_thread_s *login_thread;
-       struct iscsi_portal_group *tpg;
-       struct iscsi_tpg_np     *tpg_np;
-       /* Pointer to parent session */
-       struct iscsi_session    *sess;
-       /* Pointer to thread_set in use for this conn's threads */
-       struct iscsi_thread_set *thread_set;
-       /* list_head for session connection list */
-       struct list_head        conn_list;
-} ____cacheline_aligned;
-
-struct iscsi_conn_recovery {
-       u16                     cid;
-       u32                     cmd_count;
-       u32                     maxrecvdatasegmentlength;
-       u32                     maxxmitdatasegmentlength;
-       int                     ready_for_reallegiance;
-       struct list_head        conn_recovery_cmd_list;
-       spinlock_t              conn_recovery_cmd_lock;
-       struct timer_list       time2retain_timer;
-       struct iscsi_session    *sess;
-       struct list_head        cr_list;
-}  ____cacheline_aligned;
-
-struct iscsi_session {
-       u8                      initiator_vendor;
-       u8                      isid[6];
-       enum iscsi_timer_flags_table time2retain_timer_flags;
-       u8                      version_active;
-       u16                     cid_called;
-       u16                     conn_recovery_count;
-       u16                     tsih;
-       /* state session is currently in */
-       u32                     session_state;
-       /* session wide counter: initiator assigned task tag */
-       itt_t                   init_task_tag;
-       /* session wide counter: target assigned task tag */
-       u32                     targ_xfer_tag;
-       u32                     cmdsn_window;
-
-       /* protects cmdsn values */
-       struct mutex            cmdsn_mutex;
-       /* session wide counter: expected command sequence number */
-       u32                     exp_cmd_sn;
-       /* session wide counter: maximum allowed command sequence number */
-       u32                     max_cmd_sn;
-       struct list_head        sess_ooo_cmdsn_list;
-
-       /* LIO specific session ID */
-       u32                     sid;
-       char                    auth_type[8];
-       /* unique within the target */
-       int                     session_index;
-       /* Used for session reference counting */
-       int                     session_usage_count;
-       int                     session_waiting_on_uc;
-       atomic_long_t           cmd_pdus;
-       atomic_long_t           rsp_pdus;
-       atomic_long_t           tx_data_octets;
-       atomic_long_t           rx_data_octets;
-       atomic_long_t           conn_digest_errors;
-       atomic_long_t           conn_timeout_errors;
-       u64                     creation_time;
-       /* Number of active connections */
-       atomic_t                nconn;
-       atomic_t                session_continuation;
-       atomic_t                session_fall_back_to_erl0;
-       atomic_t                session_logout;
-       atomic_t                session_reinstatement;
-       atomic_t                session_stop_active;
-       atomic_t                sleep_on_sess_wait_comp;
-       /* connection list */
-       struct list_head        sess_conn_list;
-       struct list_head        cr_active_list;
-       struct list_head        cr_inactive_list;
-       spinlock_t              conn_lock;
-       spinlock_t              cr_a_lock;
-       spinlock_t              cr_i_lock;
-       spinlock_t              session_usage_lock;
-       spinlock_t              ttt_lock;
-       struct completion       async_msg_comp;
-       struct completion       reinstatement_comp;
-       struct completion       session_wait_comp;
-       struct completion       session_waiting_on_uc_comp;
-       struct timer_list       time2retain_timer;
-       struct iscsi_sess_ops   *sess_ops;
-       struct se_session       *se_sess;
-       struct iscsi_portal_group *tpg;
-} ____cacheline_aligned;
-
-struct iscsi_login {
-       u8 auth_complete;
-       u8 checked_for_existing;
-       u8 current_stage;
-       u8 leading_connection;
-       u8 first_request;
-       u8 version_min;
-       u8 version_max;
-       u8 login_complete;
-       u8 login_failed;
-       bool zero_tsih;
-       char isid[6];
-       u32 cmd_sn;
-       itt_t init_task_tag;
-       u32 initial_exp_statsn;
-       u32 rsp_length;
-       u16 cid;
-       u16 tsih;
-       char req[ISCSI_HDR_LEN];
-       char rsp[ISCSI_HDR_LEN];
-       char *req_buf;
-       char *rsp_buf;
-       struct iscsi_conn *conn;
-       struct iscsi_np *np;
-} ____cacheline_aligned;
-
-struct iscsi_node_attrib {
-       u32                     dataout_timeout;
-       u32                     dataout_timeout_retries;
-       u32                     default_erl;
-       u32                     nopin_timeout;
-       u32                     nopin_response_timeout;
-       u32                     random_datain_pdu_offsets;
-       u32                     random_datain_seq_offsets;
-       u32                     random_r2t_offsets;
-       u32                     tmr_cold_reset;
-       u32                     tmr_warm_reset;
-       struct iscsi_node_acl *nacl;
-};
-
-struct se_dev_entry_s;
-
-struct iscsi_node_auth {
-       enum naf_flags_table    naf_flags;
-       int                     authenticate_target;
-       /* Used for iscsit_global->discovery_auth,
-        * set to zero (auth disabled) by default */
-       int                     enforce_discovery_auth;
-#define MAX_USER_LEN                           256
-#define MAX_PASS_LEN                           256
-       char                    userid[MAX_USER_LEN];
-       char                    password[MAX_PASS_LEN];
-       char                    userid_mutual[MAX_USER_LEN];
-       char                    password_mutual[MAX_PASS_LEN];
-};
-
-#include "iscsi_target_stat.h"
-
-struct iscsi_node_stat_grps {
-       struct config_group     iscsi_sess_stats_group;
-       struct config_group     iscsi_conn_stats_group;
-};
-
-struct iscsi_node_acl {
-       struct iscsi_node_attrib node_attrib;
-       struct iscsi_node_auth  node_auth;
-       struct iscsi_node_stat_grps node_stat_grps;
-       struct se_node_acl      se_node_acl;
-};
-
-struct iscsi_tpg_attrib {
-       u32                     authentication;
-       u32                     login_timeout;
-       u32                     netif_timeout;
-       u32                     generate_node_acls;
-       u32                     cache_dynamic_acls;
-       u32                     default_cmdsn_depth;
-       u32                     demo_mode_write_protect;
-       u32                     prod_mode_write_protect;
-       u32                     demo_mode_discovery;
-       u32                     default_erl;
-       u8                      t10_pi;
-       struct iscsi_portal_group *tpg;
-};
-
-struct iscsi_np {
-       int                     np_network_transport;
-       int                     np_ip_proto;
-       int                     np_sock_type;
-       enum np_thread_state_table np_thread_state;
-       bool                    enabled;
-       enum iscsi_timer_flags_table np_login_timer_flags;
-       u32                     np_exports;
-       enum np_flags_table     np_flags;
-       unsigned char           np_ip[IPV6_ADDRESS_SPACE];
-       u16                     np_port;
-       spinlock_t              np_thread_lock;
-       struct completion       np_restart_comp;
-       struct socket           *np_socket;
-       struct __kernel_sockaddr_storage np_sockaddr;
-       struct task_struct      *np_thread;
-       struct timer_list       np_login_timer;
-       void                    *np_context;
-       struct iscsit_transport *np_transport;
-       struct list_head        np_list;
-} ____cacheline_aligned;
-
-struct iscsi_tpg_np {
-       struct iscsi_np         *tpg_np;
-       struct iscsi_portal_group *tpg;
-       struct iscsi_tpg_np     *tpg_np_parent;
-       struct list_head        tpg_np_list;
-       struct list_head        tpg_np_child_list;
-       struct list_head        tpg_np_parent_list;
-       struct se_tpg_np        se_tpg_np;
-       spinlock_t              tpg_np_parent_lock;
-       struct completion       tpg_np_comp;
-       struct kref             tpg_np_kref;
-};
-
-struct iscsi_portal_group {
-       unsigned char           tpg_chap_id;
-       /* TPG State */
-       enum tpg_state_table    tpg_state;
-       /* Target Portal Group Tag */
-       u16                     tpgt;
-       /* Id assigned to target sessions */
-       u16                     ntsih;
-       /* Number of active sessions */
-       u32                     nsessions;
-       /* Number of Network Portals available for this TPG */
-       u32                     num_tpg_nps;
-       /* Per TPG LIO specific session ID. */
-       u32                     sid;
-       /* Spinlock for adding/removing Network Portals */
-       spinlock_t              tpg_np_lock;
-       spinlock_t              tpg_state_lock;
-       struct se_portal_group tpg_se_tpg;
-       struct mutex            tpg_access_lock;
-       struct semaphore        np_login_sem;
-       struct iscsi_tpg_attrib tpg_attrib;
-       struct iscsi_node_auth  tpg_demo_auth;
-       /* Pointer to default list of iSCSI parameters for TPG */
-       struct iscsi_param_list *param_list;
-       struct iscsi_tiqn       *tpg_tiqn;
-       struct list_head        tpg_gnp_list;
-       struct list_head        tpg_list;
-} ____cacheline_aligned;
-
-struct iscsi_wwn_stat_grps {
-       struct config_group     iscsi_stat_group;
-       struct config_group     iscsi_instance_group;
-       struct config_group     iscsi_sess_err_group;
-       struct config_group     iscsi_tgt_attr_group;
-       struct config_group     iscsi_login_stats_group;
-       struct config_group     iscsi_logout_stats_group;
-};
-
-struct iscsi_tiqn {
-#define ISCSI_IQN_LEN                          224
-       unsigned char           tiqn[ISCSI_IQN_LEN];
-       enum tiqn_state_table   tiqn_state;
-       int                     tiqn_access_count;
-       u32                     tiqn_active_tpgs;
-       u32                     tiqn_ntpgs;
-       u32                     tiqn_num_tpg_nps;
-       u32                     tiqn_nsessions;
-       struct list_head        tiqn_list;
-       struct list_head        tiqn_tpg_list;
-       spinlock_t              tiqn_state_lock;
-       spinlock_t              tiqn_tpg_lock;
-       struct se_wwn           tiqn_wwn;
-       struct iscsi_wwn_stat_grps tiqn_stat_grps;
-       int                     tiqn_index;
-       struct iscsi_sess_err_stats  sess_err_stats;
-       struct iscsi_login_stats     login_stats;
-       struct iscsi_logout_stats    logout_stats;
-} ____cacheline_aligned;
-
-struct iscsit_global {
-       /* In core shutdown */
-       u32                     in_shutdown;
-       u32                     active_ts;
-       /* Unique identifier used for the authentication daemon */
-       u32                     auth_id;
-       u32                     inactive_ts;
-       /* Thread Set bitmap count */
-       int                     ts_bitmap_count;
-       /* Thread Set bitmap pointer */
-       unsigned long           *ts_bitmap;
-       /* Used for iSCSI discovery session authentication */
-       struct iscsi_node_acl   discovery_acl;
-       struct iscsi_portal_group       *discovery_tpg;
-};
-
-#endif /* ISCSI_TARGET_CORE_H */
index e93d5a7a3f8168b9330cbd2ff6238f7a588c88f4..fb3b52b124ac3772597a90ccc46cacd1acea825b 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <scsi/iscsi_proto.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_erl1.h"
 #include "iscsi_target_util.h"
index 7087c736daa520fd22306e6cc39000b7c9aa08c0..34c3cd1b05ce8a40d9911da3815bc4a44b8a479c 100644 (file)
@@ -21,7 +21,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_device.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
index a0ae5fc0ad75b5079fc7932f6dacbb19b2c68a64..1c197bad6132be98cd267b6ea666fc24e41e00a9 100644 (file)
@@ -21,7 +21,8 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
+#include <target/iscsi/iscsi_transport.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_tq.h"
 #include "iscsi_target_erl0.h"
@@ -939,7 +940,8 @@ void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
 
        if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
                spin_unlock_bh(&conn->state_lock);
-               iscsit_close_connection(conn);
+               if (conn->conn_transport->transport_type == ISCSI_TCP)
+                       iscsit_close_connection(conn);
                return;
        }
 
index cda4d80cfaef999e45e4ac8a6b894513a3a44ad6..2e561deb30a2b6cd53f186b0e388311da35ad439 100644 (file)
@@ -22,7 +22,7 @@
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_datain_values.h"
 #include "iscsi_target_device.h"
index 4ca8fd2a70db4c05597f6b4bfbac171ce58ea8c7..e24f1c7c5862d4af2f0ae53efb3e981f153080db 100644 (file)
@@ -21,7 +21,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_datain_values.h"
 #include "iscsi_target_util.h"
 #include "iscsi_target_erl0.h"
index 713c0c1877ab8d16bb999ab6738b058e11c82ba9..153fb66ac1b83693b2a7ea18580ddd4c94f9b5a9 100644 (file)
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
+#include <target/iscsi/iscsi_target_stat.h>
 #include "iscsi_target_tq.h"
 #include "iscsi_target_device.h"
 #include "iscsi_target_nego.h"
 #include "iscsi_target_erl0.h"
 #include "iscsi_target_erl2.h"
 #include "iscsi_target_login.h"
-#include "iscsi_target_stat.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
 #include "iscsi_target.h"
index 62a095f36bf2f78b77d2a9b75cea99b9c2f14ecb..8c02fa34716fae5a40dbf8cb09357bba5df2e7bd 100644 (file)
@@ -22,7 +22,7 @@
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_login.h"
 #include "iscsi_target_nego.h"
index 16454a922e2ba9ff2bc532cb0b896997b991a02e..208cca8a363c86de490aed4dcd1103fcd74b4ecc 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <target/target_core_base.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_device.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
index 18c29260b4a213d959463b41083abe6555166985..d4f9e96456978eab75e280e123308db09f6e95d2 100644 (file)
@@ -18,7 +18,7 @@
 
 #include <linux/slab.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_util.h"
 #include "iscsi_target_parameters.h"
 
index ca41b583f2f6d048927b318d69fd56ae9907c015..e446a09c886b1a2ca1344d87f755806b237fe406 100644 (file)
@@ -20,7 +20,7 @@
 #include <linux/slab.h>
 #include <linux/random.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_util.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_seq_pdu_list.h"
index 10339551030767cd64428686bcb66e0a186c530d..5e1349a3b1438ece26d986f31608bd6912393371 100644 (file)
 #include <target/target_core_base.h>
 #include <target/configfs_macros.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_device.h"
 #include "iscsi_target_tpg.h"
 #include "iscsi_target_util.h"
-#include "iscsi_target_stat.h"
+#include <target/iscsi/iscsi_target_stat.h>
 
 #ifndef INITIAL_JIFFIES
 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
diff --git a/drivers/target/iscsi/iscsi_target_stat.h b/drivers/target/iscsi/iscsi_target_stat.h
deleted file mode 100644 (file)
index 3ff76b4..0000000
+++ /dev/null
@@ -1,64 +0,0 @@
-#ifndef ISCSI_TARGET_STAT_H
-#define ISCSI_TARGET_STAT_H
-
-/*
- * For struct iscsi_tiqn->tiqn_wwn default groups
- */
-extern struct config_item_type iscsi_stat_instance_cit;
-extern struct config_item_type iscsi_stat_sess_err_cit;
-extern struct config_item_type iscsi_stat_tgt_attr_cit;
-extern struct config_item_type iscsi_stat_login_cit;
-extern struct config_item_type iscsi_stat_logout_cit;
-
-/*
- * For struct iscsi_session->se_sess default groups
- */
-extern struct config_item_type iscsi_stat_sess_cit;
-
-/* iSCSI session error types */
-#define ISCSI_SESS_ERR_UNKNOWN         0
-#define ISCSI_SESS_ERR_DIGEST          1
-#define ISCSI_SESS_ERR_CXN_TIMEOUT     2
-#define ISCSI_SESS_ERR_PDU_FORMAT      3
-
-/* iSCSI session error stats */
-struct iscsi_sess_err_stats {
-       spinlock_t      lock;
-       u32             digest_errors;
-       u32             cxn_timeout_errors;
-       u32             pdu_format_errors;
-       u32             last_sess_failure_type;
-       char            last_sess_fail_rem_name[224];
-} ____cacheline_aligned;
-
-/* iSCSI login failure types (sub oids) */
-#define ISCSI_LOGIN_FAIL_OTHER         2
-#define ISCSI_LOGIN_FAIL_REDIRECT      3
-#define ISCSI_LOGIN_FAIL_AUTHORIZE     4
-#define ISCSI_LOGIN_FAIL_AUTHENTICATE  5
-#define ISCSI_LOGIN_FAIL_NEGOTIATE     6
-
-/* iSCSI login stats */
-struct iscsi_login_stats {
-       spinlock_t      lock;
-       u32             accepts;
-       u32             other_fails;
-       u32             redirects;
-       u32             authorize_fails;
-       u32             authenticate_fails;
-       u32             negotiate_fails;        /* used for notifications */
-       u64             last_fail_time;         /* time stamp (jiffies) */
-       u32             last_fail_type;
-       int             last_intr_fail_ip_family;
-       unsigned char   last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
-       char            last_intr_fail_name[224];
-} ____cacheline_aligned;
-
-/* iSCSI logout stats */
-struct iscsi_logout_stats {
-       spinlock_t      lock;
-       u32             normal_logouts;
-       u32             abnormal_logouts;
-} ____cacheline_aligned;
-
-#endif   /*** ISCSI_TARGET_STAT_H ***/
index 78404b1cc0bf311eb80b738d68eacca99ea446d0..b0224a77e26d3aff2549b71dd4b8c6a816213557 100644 (file)
@@ -23,7 +23,7 @@
 #include <target/target_core_fabric.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_datain_values.h"
 #include "iscsi_target_device.h"
index 9053a3c0c6e51675faf45bfbbfd990d48752f67d..bdd127c0e3aed1c718f0bd8b7d656a2c3aefcb71 100644 (file)
@@ -20,7 +20,7 @@
 #include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_erl0.h"
 #include "iscsi_target_login.h"
 #include "iscsi_target_nodeattrib.h"
index 601e9cc61e98e754f9a4927f8be3f0ae6d2fc3c3..26aa509964737cbe570f0b87492bc30b8ec4111b 100644 (file)
 #include <linux/list.h>
 #include <linux/bitmap.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_tq.h"
 #include "iscsi_target.h"
 
-static LIST_HEAD(active_ts_list);
 static LIST_HEAD(inactive_ts_list);
-static DEFINE_SPINLOCK(active_ts_lock);
 static DEFINE_SPINLOCK(inactive_ts_lock);
 static DEFINE_SPINLOCK(ts_bitmap_lock);
 
-static void iscsi_add_ts_to_active_list(struct iscsi_thread_set *ts)
-{
-       spin_lock(&active_ts_lock);
-       list_add_tail(&ts->ts_list, &active_ts_list);
-       iscsit_global->active_ts++;
-       spin_unlock(&active_ts_lock);
-}
-
 static void iscsi_add_ts_to_inactive_list(struct iscsi_thread_set *ts)
 {
+       if (!list_empty(&ts->ts_list)) {
+               WARN_ON(1);
+               return;
+       }
        spin_lock(&inactive_ts_lock);
        list_add_tail(&ts->ts_list, &inactive_ts_list);
        iscsit_global->inactive_ts++;
        spin_unlock(&inactive_ts_lock);
 }
 
-static void iscsi_del_ts_from_active_list(struct iscsi_thread_set *ts)
-{
-       spin_lock(&active_ts_lock);
-       list_del(&ts->ts_list);
-       iscsit_global->active_ts--;
-       spin_unlock(&active_ts_lock);
-}
-
 static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
 {
        struct iscsi_thread_set *ts;
@@ -66,7 +52,7 @@ static struct iscsi_thread_set *iscsi_get_ts_from_inactive_list(void)
 
        ts = list_first_entry(&inactive_ts_list, struct iscsi_thread_set, ts_list);
 
-       list_del(&ts->ts_list);
+       list_del_init(&ts->ts_list);
        iscsit_global->inactive_ts--;
        spin_unlock(&inactive_ts_lock);
 
@@ -204,8 +190,6 @@ static void iscsi_deallocate_extra_thread_sets(void)
 
 void iscsi_activate_thread_set(struct iscsi_conn *conn, struct iscsi_thread_set *ts)
 {
-       iscsi_add_ts_to_active_list(ts);
-
        spin_lock_bh(&ts->ts_state_lock);
        conn->thread_set = ts;
        ts->conn = conn;
@@ -397,7 +381,6 @@ struct iscsi_conn *iscsi_rx_thread_pre_handler(struct iscsi_thread_set *ts)
 
        if (ts->delay_inactive && (--ts->thread_count == 0)) {
                spin_unlock_bh(&ts->ts_state_lock);
-               iscsi_del_ts_from_active_list(ts);
 
                if (!iscsit_global->in_shutdown)
                        iscsi_deallocate_extra_thread_sets();
@@ -452,7 +435,6 @@ struct iscsi_conn *iscsi_tx_thread_pre_handler(struct iscsi_thread_set *ts)
 
        if (ts->delay_inactive && (--ts->thread_count == 0)) {
                spin_unlock_bh(&ts->ts_state_lock);
-               iscsi_del_ts_from_active_list(ts);
 
                if (!iscsit_global->in_shutdown)
                        iscsi_deallocate_extra_thread_sets();
index bcd88ec99793ba554496369d8853ee391cef8c11..390df8ed72b26738c53073180063c42847850585 100644 (file)
@@ -25,7 +25,7 @@
 #include <target/target_core_configfs.h>
 #include <target/iscsi/iscsi_transport.h>
 
-#include "iscsi_target_core.h"
+#include <target/iscsi/iscsi_target_core.h>
 #include "iscsi_target_parameters.h"
 #include "iscsi_target_seq_pdu_list.h"
 #include "iscsi_target_datain_values.h"
@@ -390,6 +390,7 @@ struct iscsi_cmd *iscsit_find_cmd_from_itt(
                        init_task_tag, conn->cid);
        return NULL;
 }
+EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
 
 struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(
        struct iscsi_conn *conn,
@@ -939,13 +940,8 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response)
        state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
                                ISTATE_SEND_NOPIN_NO_RESPONSE;
        cmd->init_task_tag = RESERVED_ITT;
-       spin_lock_bh(&conn->sess->ttt_lock);
-       cmd->targ_xfer_tag = (want_response) ? conn->sess->targ_xfer_tag++ :
-                       0xFFFFFFFF;
-       if (want_response && (cmd->targ_xfer_tag == 0xFFFFFFFF))
-               cmd->targ_xfer_tag = conn->sess->targ_xfer_tag++;
-       spin_unlock_bh(&conn->sess->ttt_lock);
-
+       cmd->targ_xfer_tag = (want_response) ?
+                            session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
        spin_lock_bh(&conn->cmd_lock);
        list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
        spin_unlock_bh(&conn->cmd_lock);
index a68508c4fec862b325c6a5015b9493871f166e08..1ab754a671ff301977a90c90246ae5a223352051 100644 (file)
@@ -16,7 +16,6 @@ extern struct iscsi_r2t *iscsit_get_holder_for_r2tsn(struct iscsi_cmd *, u32);
 extern int iscsit_sequence_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                               unsigned char * ,__be32 cmdsn);
 extern int iscsit_check_unsolicited_dataout(struct iscsi_cmd *, unsigned char *);
-extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
 extern struct iscsi_cmd *iscsit_find_cmd_from_itt_or_dump(struct iscsi_conn *,
                        itt_t, u32);
 extern struct iscsi_cmd *iscsit_find_cmd_from_ttt(struct iscsi_conn *, u32);
index d836de200a03bcf24be54004df89c7d6d5039030..44620fb6bd45e96eae5b13dac4b9582a0d86c85e 100644 (file)
@@ -494,6 +494,11 @@ fd_execute_write_same(struct se_cmd *cmd)
                target_complete_cmd(cmd, SAM_STAT_GOOD);
                return 0;
        }
+       if (cmd->prot_op) {
+               pr_err("WRITE_SAME: Protection information with FILEIO"
+                      " backends not supported\n");
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        sg = &cmd->t_data_sg[0];
 
        if (cmd->t_data_nents > 1 ||
index 78346b850968ed8da28d88f35cf6a3ac15512a1b..d4a4b0fb444a12907fac835f6a3db03600b722fa 100644 (file)
@@ -464,6 +464,11 @@ iblock_execute_write_same(struct se_cmd *cmd)
        sector_t block_lba = cmd->t_task_lba;
        sector_t sectors = sbc_get_write_same_sectors(cmd);
 
+       if (cmd->prot_op) {
+               pr_err("WRITE_SAME: Protection information with IBLOCK"
+                      " backends not supported\n");
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
        sg = &cmd->t_data_sg[0];
 
        if (cmd->t_data_nents > 1 ||
index 283cf786ef98be3d0594e847cc9749a072986b80..2de6fb8cee8d83b8338472ee2a51dd02c078e833 100644 (file)
@@ -1874,8 +1874,8 @@ static int core_scsi3_update_aptpl_buf(
                }
 
                if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
-                       pr_err("Unable to update renaming"
-                               " APTPL metadata\n");
+                       pr_err("Unable to update renaming APTPL metadata,"
+                              " reallocating larger buffer\n");
                        ret = -EMSGSIZE;
                        goto out;
                }
@@ -1892,8 +1892,8 @@ static int core_scsi3_update_aptpl_buf(
                        lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
 
                if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
-                       pr_err("Unable to update renaming"
-                               " APTPL metadata\n");
+                       pr_err("Unable to update renaming APTPL metadata,"
+                              " reallocating larger buffer\n");
                        ret = -EMSGSIZE;
                        goto out;
                }
@@ -1956,7 +1956,7 @@ static int __core_scsi3_write_aptpl_to_file(
 static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, bool aptpl)
 {
        unsigned char *buf;
-       int rc;
+       int rc, len = PR_APTPL_BUF_LEN;
 
        if (!aptpl) {
                char *null_buf = "No Registrations or Reservations\n";
@@ -1970,25 +1970,26 @@ static sense_reason_t core_scsi3_update_and_write_aptpl(struct se_device *dev, b
 
                return 0;
        }
-
-       buf = kzalloc(PR_APTPL_BUF_LEN, GFP_KERNEL);
+retry:
+       buf = vzalloc(len);
        if (!buf)
                return TCM_OUT_OF_RESOURCES;
 
-       rc = core_scsi3_update_aptpl_buf(dev, buf, PR_APTPL_BUF_LEN);
+       rc = core_scsi3_update_aptpl_buf(dev, buf, len);
        if (rc < 0) {
-               kfree(buf);
-               return TCM_OUT_OF_RESOURCES;
+               vfree(buf);
+               len *= 2;
+               goto retry;
        }
 
        rc = __core_scsi3_write_aptpl_to_file(dev, buf);
        if (rc != 0) {
                pr_err("SPC-3 PR: Could not update APTPL\n");
-               kfree(buf);
+               vfree(buf);
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
        dev->t10_pr.pr_aptpl_active = 1;
-       kfree(buf);
+       vfree(buf);
        pr_debug("SPC-3 PR: Set APTPL Bit Activated\n");
        return 0;
 }
index cd4bed7b27579b14a0f6e517eed23dacb6c1fe02..9a2f9d3a6e70514ad9351ab6e144709e8b95f91d 100644 (file)
@@ -36,6 +36,9 @@
 #include "target_core_ua.h"
 #include "target_core_alua.h"
 
+static sense_reason_t
+sbc_check_prot(struct se_device *, struct se_cmd *, unsigned char *, u32, bool);
+
 static sense_reason_t
 sbc_emulate_readcapacity(struct se_cmd *cmd)
 {
@@ -251,7 +254,10 @@ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb)
 static sense_reason_t
 sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *ops)
 {
+       struct se_device *dev = cmd->se_dev;
+       sector_t end_lba = dev->transport->get_blocks(dev) + 1;
        unsigned int sectors = sbc_get_write_same_sectors(cmd);
+       sense_reason_t ret;
 
        if ((flags[0] & 0x04) || (flags[0] & 0x02)) {
                pr_err("WRITE_SAME PBDATA and LBDATA"
@@ -264,6 +270,16 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
                        sectors, cmd->se_dev->dev_attrib.max_write_same_len);
                return TCM_INVALID_CDB_FIELD;
        }
+       /*
+        * Sanity check for LBA wrap and request past end of device.
+        */
+       if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+           ((cmd->t_task_lba + sectors) > end_lba)) {
+               pr_err("WRITE_SAME exceeds last lba %llu (lba %llu, sectors %u)\n",
+                      (unsigned long long)end_lba, cmd->t_task_lba, sectors);
+               return TCM_ADDRESS_OUT_OF_RANGE;
+       }
+
        /* We always have ANC_SUP == 0 so setting ANCHOR is always an error */
        if (flags[0] & 0x10) {
                pr_warn("WRITE SAME with ANCHOR not supported\n");
@@ -277,12 +293,21 @@ sbc_setup_write_same(struct se_cmd *cmd, unsigned char *flags, struct sbc_ops *o
                if (!ops->execute_write_same_unmap)
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+               if (!dev->dev_attrib.emulate_tpws) {
+                       pr_err("Got WRITE_SAME w/ UNMAP=1, but backend device"
+                              " has emulate_tpws disabled\n");
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               }
                cmd->execute_cmd = ops->execute_write_same_unmap;
                return 0;
        }
        if (!ops->execute_write_same)
                return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+       ret = sbc_check_prot(dev, cmd, &cmd->t_task_cdb[0], sectors, true);
+       if (ret)
+               return ret;
+
        cmd->execute_cmd = ops->execute_write_same;
        return 0;
 }
@@ -614,14 +639,21 @@ sbc_set_prot_op_checks(u8 protect, enum target_prot_type prot_type,
        return 0;
 }
 
-static bool
+static sense_reason_t
 sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
               u32 sectors, bool is_write)
 {
        u8 protect = cdb[1] >> 5;
 
-       if ((!cmd->t_prot_sg || !cmd->t_prot_nents) && cmd->prot_pto)
-               return true;
+       if (!cmd->t_prot_sg || !cmd->t_prot_nents) {
+               if (protect && !dev->dev_attrib.pi_prot_type) {
+                       pr_err("CDB contains protect bit, but device does not"
+                              " advertise PROTECT=1 feature bit\n");
+                       return TCM_INVALID_CDB_FIELD;
+               }
+               if (cmd->prot_pto)
+                       return TCM_NO_SENSE;
+       }
 
        switch (dev->dev_attrib.pi_prot_type) {
        case TARGET_DIF_TYPE3_PROT:
@@ -629,7 +661,7 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
                break;
        case TARGET_DIF_TYPE2_PROT:
                if (protect)
-                       return false;
+                       return TCM_INVALID_CDB_FIELD;
 
                cmd->reftag_seed = cmd->t_task_lba;
                break;
@@ -638,12 +670,12 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
                break;
        case TARGET_DIF_TYPE0_PROT:
        default:
-               return true;
+               return TCM_NO_SENSE;
        }
 
        if (sbc_set_prot_op_checks(protect, dev->dev_attrib.pi_prot_type,
                                   is_write, cmd))
-               return false;
+               return TCM_INVALID_CDB_FIELD;
 
        cmd->prot_type = dev->dev_attrib.pi_prot_type;
        cmd->prot_length = dev->prot_length * sectors;
@@ -662,7 +694,30 @@ sbc_check_prot(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb,
                 __func__, cmd->prot_type, cmd->data_length, cmd->prot_length,
                 cmd->prot_op, cmd->prot_checks);
 
-       return true;
+       return TCM_NO_SENSE;
+}
+
+static int
+sbc_check_dpofua(struct se_device *dev, struct se_cmd *cmd, unsigned char *cdb)
+{
+       if (cdb[1] & 0x10) {
+               if (!dev->dev_attrib.emulate_dpo) {
+                       pr_err("Got CDB: 0x%02x with DPO bit set, but device"
+                              " does not advertise support for DPO\n", cdb[0]);
+                       return -EINVAL;
+               }
+       }
+       if (cdb[1] & 0x8) {
+               if (!dev->dev_attrib.emulate_fua_write ||
+                   !dev->dev_attrib.emulate_write_cache) {
+                       pr_err("Got CDB: 0x%02x with FUA bit set, but device"
+                              " does not advertise support for FUA write\n",
+                              cdb[0]);
+                       return -EINVAL;
+               }
+               cmd->se_cmd_flags |= SCF_FUA;
+       }
+       return 0;
 }
 
 sense_reason_t
@@ -686,8 +741,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_10(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               if (ret)
+                       return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
@@ -697,8 +756,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_12(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               if (ret)
+                       return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
@@ -708,8 +771,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_16(cdb);
                cmd->t_task_lba = transport_lba_64(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, false))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, false);
+               if (ret)
+                       return ret;
 
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
@@ -727,11 +794,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_10(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               if (ret)
+                       return ret;
 
-               if (cdb[1] & 0x8)
-                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
@@ -740,11 +809,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_12(cdb);
                cmd->t_task_lba = transport_lba_32(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               if (ret)
+                       return ret;
 
-               if (cdb[1] & 0x8)
-                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
@@ -753,11 +824,13 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                sectors = transport_get_sectors_16(cdb);
                cmd->t_task_lba = transport_lba_64(cdb);
 
-               if (!sbc_check_prot(dev, cmd, cdb, sectors, true))
-                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
+               ret = sbc_check_prot(dev, cmd, cdb, sectors, true);
+               if (ret)
+                       return ret;
 
-               if (cdb[1] & 0x8)
-                       cmd->se_cmd_flags |= SCF_FUA;
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
                cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
@@ -768,6 +841,9 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        return TCM_INVALID_CDB_FIELD;
                sectors = transport_get_sectors_10(cdb);
 
+               if (sbc_check_dpofua(dev, cmd, cdb))
+                       return TCM_INVALID_CDB_FIELD;
+
                cmd->t_task_lba = transport_lba_32(cdb);
                cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
 
@@ -777,8 +853,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                cmd->execute_rw = ops->execute_rw;
                cmd->execute_cmd = sbc_execute_rw;
                cmd->transport_complete_callback = &xdreadwrite_callback;
-               if (cdb[1] & 0x8)
-                       cmd->se_cmd_flags |= SCF_FUA;
                break;
        case VARIABLE_LENGTH_CMD:
        {
@@ -787,6 +861,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                case XDWRITEREAD_32:
                        sectors = transport_get_sectors_32(cdb);
 
+                       if (sbc_check_dpofua(dev, cmd, cdb))
+                               return TCM_INVALID_CDB_FIELD;
                        /*
                         * Use WRITE_32 and READ_32 opcodes for the emulated
                         * XDWRITE_READ_32 logic.
@@ -801,8 +877,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                        cmd->execute_rw = ops->execute_rw;
                        cmd->execute_cmd = sbc_execute_rw;
                        cmd->transport_complete_callback = &xdreadwrite_callback;
-                       if (cdb[1] & 0x8)
-                               cmd->se_cmd_flags |= SCF_FUA;
                        break;
                case WRITE_SAME_32:
                        sectors = transport_get_sectors_32(cdb);
@@ -888,6 +962,11 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                if (!ops->execute_unmap)
                        return TCM_UNSUPPORTED_SCSI_OPCODE;
 
+               if (!dev->dev_attrib.emulate_tpu) {
+                       pr_err("Got UNMAP, but backend device has"
+                              " emulate_tpu disabled\n");
+                       return TCM_UNSUPPORTED_SCSI_OPCODE;
+               }
                size = get_unaligned_be16(&cdb[7]);
                cmd->execute_cmd = ops->execute_unmap;
                break;
@@ -955,7 +1034,8 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
                unsigned long long end_lba;
 check_lba:
                end_lba = dev->transport->get_blocks(dev) + 1;
-               if (cmd->t_task_lba + sectors > end_lba) {
+               if (((cmd->t_task_lba + sectors) < cmd->t_task_lba) ||
+                   ((cmd->t_task_lba + sectors) > end_lba)) {
                        pr_err("cmd exceeds last lba %llu "
                                "(lba %llu, sectors %u)\n",
                                end_lba, cmd->t_task_lba, sectors);
index 4c71657da56ab3cdc96b5c1f8d784722f10e2c1d..460e9310947399661ce1fbd6ab4ed3e86b4a47e1 100644 (file)
@@ -647,7 +647,7 @@ spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
         * support the use of the WRITE SAME (16) command to unmap LBAs.
         */
        if (dev->dev_attrib.emulate_tpws != 0)
-               buf[5] |= 0x40;
+               buf[5] |= 0x40 | 0x20;
 
        return 0;
 }
index d4413698a85f9738d226d5f82793e9d3c6f46abe..ba77a34f659f180357d22ce98c602b77c83882f2 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_INT340X_THERMAL)  += int3400_thermal.o
+obj-$(CONFIG_INT340X_THERMAL)  += int340x_thermal_zone.o
 obj-$(CONFIG_INT340X_THERMAL)  += int3402_thermal.o
 obj-$(CONFIG_INT340X_THERMAL)  += int3403_thermal.o
 obj-$(CONFIG_INT340X_THERMAL)  += processor_thermal_device.o
index 65a98a97df071cdf343776bc1e959dc9808dbbc4..25d244cbbe8fda719658e35be8f2081ee0f5989f 100644 (file)
 
 enum int3400_thermal_uuid {
        INT3400_THERMAL_PASSIVE_1,
-       INT3400_THERMAL_PASSIVE_2,
        INT3400_THERMAL_ACTIVE,
        INT3400_THERMAL_CRITICAL,
-       INT3400_THERMAL_COOLING_MODE,
        INT3400_THERMAL_MAXIMUM_UUID,
 };
 
 static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
        "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
-       "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
        "3A95C389-E4B8-4629-A526-C52C88626BAE",
        "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
-       "16CAF1B7-DD38-40ed-B1C1-1B8A1913D531",
 };
 
 struct int3400_thermal_priv {
index c5cbc3af3a0539260218492bc5aaa6199d8d5307..69df3d960303170070d3a35f2037602129fbb613 100644 (file)
 #include <linux/platform_device.h>
 #include <linux/acpi.h>
 #include <linux/thermal.h>
+#include "int340x_thermal_zone.h"
 
-#define ACPI_ACTIVE_COOLING_MAX_NR 10
-
-struct active_trip {
-       unsigned long temp;
-       int id;
-       bool valid;
-};
+#define INT3402_PERF_CHANGED_EVENT     0x80
+#define INT3402_THERMAL_EVENT          0x90
 
 struct int3402_thermal_data {
-       unsigned long *aux_trips;
-       int aux_trip_nr;
-       unsigned long psv_temp;
-       int psv_trip_id;
-       unsigned long crt_temp;
-       int crt_trip_id;
-       unsigned long hot_temp;
-       int hot_trip_id;
-       struct active_trip act_trips[ACPI_ACTIVE_COOLING_MAX_NR];
        acpi_handle *handle;
+       struct int34x_thermal_zone *int340x_zone;
 };
 
-static int int3402_thermal_get_zone_temp(struct thermal_zone_device *zone,
-                                        unsigned long *temp)
-{
-       struct int3402_thermal_data *d = zone->devdata;
-       unsigned long long tmp;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(d->handle, "_TMP", NULL, &tmp);
-       if (ACPI_FAILURE(status))
-               return -ENODEV;
-
-       /* _TMP returns the temperature in tenths of degrees Kelvin */
-       *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
-
-       return 0;
-}
-
-static int int3402_thermal_get_trip_temp(struct thermal_zone_device *zone,
-                                        int trip, unsigned long *temp)
+static void int3402_notify(acpi_handle handle, u32 event, void *data)
 {
-       struct int3402_thermal_data *d = zone->devdata;
-       int i;
-
-       if (trip < d->aux_trip_nr)
-               *temp = d->aux_trips[trip];
-       else if (trip == d->crt_trip_id)
-               *temp = d->crt_temp;
-       else if (trip == d->psv_trip_id)
-               *temp = d->psv_temp;
-       else if (trip == d->hot_trip_id)
-               *temp = d->hot_temp;
-       else {
-               for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
-                       if (d->act_trips[i].valid &&
-                           d->act_trips[i].id == trip) {
-                               *temp = d->act_trips[i].temp;
-                               break;
-                       }
-               }
-               if (i == ACPI_ACTIVE_COOLING_MAX_NR)
-                       return -EINVAL;
+       struct int3402_thermal_data *priv = data;
+
+       if (!priv)
+               return;
+
+       switch (event) {
+       case INT3402_PERF_CHANGED_EVENT:
+               break;
+       case INT3402_THERMAL_EVENT:
+               int340x_thermal_zone_device_update(priv->int340x_zone);
+               break;
+       default:
+               break;
        }
-       return 0;
-}
-
-static int int3402_thermal_get_trip_type(struct thermal_zone_device *zone,
-                                        int trip, enum thermal_trip_type *type)
-{
-       struct int3402_thermal_data *d = zone->devdata;
-       int i;
-
-       if (trip < d->aux_trip_nr)
-               *type = THERMAL_TRIP_PASSIVE;
-       else if (trip == d->crt_trip_id)
-               *type = THERMAL_TRIP_CRITICAL;
-       else if (trip == d->hot_trip_id)
-               *type = THERMAL_TRIP_HOT;
-       else if (trip == d->psv_trip_id)
-               *type = THERMAL_TRIP_PASSIVE;
-       else {
-               for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
-                       if (d->act_trips[i].valid &&
-                           d->act_trips[i].id == trip) {
-                               *type = THERMAL_TRIP_ACTIVE;
-                               break;
-                       }
-               }
-               if (i == ACPI_ACTIVE_COOLING_MAX_NR)
-                       return -EINVAL;
-       }
-       return 0;
-}
-
-static int int3402_thermal_set_trip_temp(struct thermal_zone_device *zone, int trip,
-                                 unsigned long temp)
-{
-       struct int3402_thermal_data *d = zone->devdata;
-       acpi_status status;
-       char name[10];
-
-       snprintf(name, sizeof(name), "PAT%d", trip);
-       status = acpi_execute_simple_method(d->handle, name,
-                       MILLICELSIUS_TO_DECI_KELVIN(temp));
-       if (ACPI_FAILURE(status))
-               return -EIO;
-
-       d->aux_trips[trip] = temp;
-       return 0;
-}
-
-static struct thermal_zone_device_ops int3402_thermal_zone_ops = {
-       .get_temp       = int3402_thermal_get_zone_temp,
-       .get_trip_temp  = int3402_thermal_get_trip_temp,
-       .get_trip_type  = int3402_thermal_get_trip_type,
-       .set_trip_temp  = int3402_thermal_set_trip_temp,
-};
-
-static struct thermal_zone_params int3402_thermal_params = {
-       .governor_name = "user_space",
-       .no_hwmon = true,
-};
-
-static int int3402_thermal_get_temp(acpi_handle handle, char *name,
-                                   unsigned long *temp)
-{
-       unsigned long long r;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(handle, name, NULL, &r);
-       if (ACPI_FAILURE(status))
-               return -EIO;
-
-       *temp = DECI_KELVIN_TO_MILLICELSIUS(r);
-       return 0;
 }
 
 static int int3402_thermal_probe(struct platform_device *pdev)
 {
        struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
        struct int3402_thermal_data *d;
-       struct thermal_zone_device *zone;
-       acpi_status status;
-       unsigned long long trip_cnt;
-       int trip_mask = 0, i;
+       int ret;
 
        if (!acpi_has_method(adev->handle, "_TMP"))
                return -ENODEV;
@@ -168,54 +55,33 @@ static int int3402_thermal_probe(struct platform_device *pdev)
        if (!d)
                return -ENOMEM;
 
-       status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt);
-       if (ACPI_FAILURE(status))
-               trip_cnt = 0;
-       else {
-               d->aux_trips = devm_kzalloc(&pdev->dev,
-                               sizeof(*d->aux_trips) * trip_cnt, GFP_KERNEL);
-               if (!d->aux_trips)
-                       return -ENOMEM;
-               trip_mask = trip_cnt - 1;
-               d->handle = adev->handle;
-               d->aux_trip_nr = trip_cnt;
-       }
-
-       d->crt_trip_id = -1;
-       if (!int3402_thermal_get_temp(adev->handle, "_CRT", &d->crt_temp))
-               d->crt_trip_id = trip_cnt++;
-       d->hot_trip_id = -1;
-       if (!int3402_thermal_get_temp(adev->handle, "_HOT", &d->hot_temp))
-               d->hot_trip_id = trip_cnt++;
-       d->psv_trip_id = -1;
-       if (!int3402_thermal_get_temp(adev->handle, "_PSV", &d->psv_temp))
-               d->psv_trip_id = trip_cnt++;
-       for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
-               char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
-               if (int3402_thermal_get_temp(adev->handle, name,
-                                            &d->act_trips[i].temp))
-                       break;
-               d->act_trips[i].id = trip_cnt++;
-               d->act_trips[i].valid = true;
+       d->int340x_zone = int340x_thermal_zone_add(adev, NULL);
+       if (IS_ERR(d->int340x_zone))
+               return PTR_ERR(d->int340x_zone);
+
+       ret = acpi_install_notify_handler(adev->handle,
+                                         ACPI_DEVICE_NOTIFY,
+                                         int3402_notify,
+                                         d);
+       if (ret) {
+               int340x_thermal_zone_remove(d->int340x_zone);
+               return ret;
        }
 
-       zone = thermal_zone_device_register(acpi_device_bid(adev), trip_cnt,
-                                           trip_mask, d,
-                                           &int3402_thermal_zone_ops,
-                                           &int3402_thermal_params,
-                                           0, 0);
-       if (IS_ERR(zone))
-               return PTR_ERR(zone);
-       platform_set_drvdata(pdev, zone);
+       d->handle = adev->handle;
+       platform_set_drvdata(pdev, d);
 
        return 0;
 }
 
 static int int3402_thermal_remove(struct platform_device *pdev)
 {
-       struct thermal_zone_device *zone = platform_get_drvdata(pdev);
+       struct int3402_thermal_data *d = platform_get_drvdata(pdev);
+
+       acpi_remove_notify_handler(d->handle,
+                                  ACPI_DEVICE_NOTIFY, int3402_notify);
+       int340x_thermal_zone_remove(d->int340x_zone);
 
-       thermal_zone_device_unregister(zone);
        return 0;
 }
 
index 0faf500d8a77874d7c1b6c8a1b3e1195fc9e8065..50a7a08e3a15ee1e351953bd5a9b8308d8c9602f 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/acpi.h>
 #include <linux/thermal.h>
 #include <linux/platform_device.h>
+#include "int340x_thermal_zone.h"
 
 #define INT3403_TYPE_SENSOR            0x03
 #define INT3403_TYPE_CHARGER           0x0B
 #define INT3403_PERF_CHANGED_EVENT     0x80
 #define INT3403_THERMAL_EVENT          0x90
 
-#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100)
-#define KELVIN_OFFSET  2732
-#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off))
-
+/* Preserved structure for future expandbility */
 struct int3403_sensor {
-       struct thermal_zone_device *tzone;
-       unsigned long *thresholds;
-       unsigned long   crit_temp;
-       int             crit_trip_id;
-       unsigned long   psv_temp;
-       int             psv_trip_id;
-
+       struct int34x_thermal_zone *int340x_zone;
 };
 
 struct int3403_performance_state {
@@ -63,126 +55,6 @@ struct int3403_priv {
        void *priv;
 };
 
-static int sys_get_curr_temp(struct thermal_zone_device *tzone,
-                               unsigned long *temp)
-{
-       struct int3403_priv *priv = tzone->devdata;
-       struct acpi_device *device = priv->adev;
-       unsigned long long tmp;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp);
-       if (ACPI_FAILURE(status))
-               return -EIO;
-
-       *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET);
-
-       return 0;
-}
-
-static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
-               int trip, unsigned long *temp)
-{
-       struct int3403_priv *priv = tzone->devdata;
-       struct acpi_device *device = priv->adev;
-       unsigned long long hyst;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst);
-       if (ACPI_FAILURE(status))
-               return -EIO;
-
-       /*
-        * Thermal hysteresis represents a temperature difference.
-        * Kelvin and Celsius have same degree size. So the
-        * conversion here between tenths of degree Kelvin unit
-        * and Milli-Celsius unit is just to multiply 100.
-        */
-       *temp = hyst * 100;
-
-       return 0;
-}
-
-static int sys_get_trip_temp(struct thermal_zone_device *tzone,
-               int trip, unsigned long *temp)
-{
-       struct int3403_priv *priv = tzone->devdata;
-       struct int3403_sensor *obj = priv->priv;
-
-       if (priv->type != INT3403_TYPE_SENSOR || !obj)
-               return -EINVAL;
-
-       if (trip == obj->crit_trip_id)
-               *temp = obj->crit_temp;
-       else if (trip == obj->psv_trip_id)
-               *temp = obj->psv_temp;
-       else {
-               /*
-                * get_trip_temp is a mandatory callback but
-                * PATx method doesn't return any value, so return
-                * cached value, which was last set from user space
-                */
-               *temp = obj->thresholds[trip];
-       }
-
-       return 0;
-}
-
-static int sys_get_trip_type(struct thermal_zone_device *thermal,
-               int trip, enum thermal_trip_type *type)
-{
-       struct int3403_priv *priv = thermal->devdata;
-       struct int3403_sensor *obj = priv->priv;
-
-       /* Mandatory callback, may not mean much here */
-       if (trip == obj->crit_trip_id)
-               *type = THERMAL_TRIP_CRITICAL;
-       else
-               *type = THERMAL_TRIP_PASSIVE;
-
-       return 0;
-}
-
-int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip,
-                                                       unsigned long temp)
-{
-       struct int3403_priv *priv = tzone->devdata;
-       struct acpi_device *device = priv->adev;
-       struct int3403_sensor *obj = priv->priv;
-       acpi_status status;
-       char name[10];
-       int ret = 0;
-
-       snprintf(name, sizeof(name), "PAT%d", trip);
-       if (acpi_has_method(device->handle, name)) {
-               status = acpi_execute_simple_method(device->handle, name,
-                               MILLI_CELSIUS_TO_DECI_KELVIN(temp,
-                                                       KELVIN_OFFSET));
-               if (ACPI_FAILURE(status))
-                       ret = -EIO;
-               else
-                       obj->thresholds[trip] = temp;
-       } else {
-               ret = -EIO;
-               dev_err(&device->dev, "sys_set_trip_temp: method not found\n");
-       }
-
-       return ret;
-}
-
-static struct thermal_zone_device_ops tzone_ops = {
-       .get_temp = sys_get_curr_temp,
-       .get_trip_temp = sys_get_trip_temp,
-       .get_trip_type = sys_get_trip_type,
-       .set_trip_temp = sys_set_trip_temp,
-       .get_trip_hyst =  sys_get_trip_hyst,
-};
-
-static struct thermal_zone_params int3403_thermal_params = {
-       .governor_name = "user_space",
-       .no_hwmon = true,
-};
-
 static void int3403_notify(acpi_handle handle,
                u32 event, void *data)
 {
@@ -200,7 +72,7 @@ static void int3403_notify(acpi_handle handle,
        case INT3403_PERF_CHANGED_EVENT:
                break;
        case INT3403_THERMAL_EVENT:
-               thermal_zone_device_update(obj->tzone);
+               int340x_thermal_zone_device_update(obj->int340x_zone);
                break;
        default:
                dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
@@ -208,41 +80,10 @@ static void int3403_notify(acpi_handle handle,
        }
 }
 
-static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp)
-{
-       unsigned long long crt;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt);
-       if (ACPI_FAILURE(status))
-               return -EIO;
-
-       *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET);
-
-       return 0;
-}
-
-static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp)
-{
-       unsigned long long psv;
-       acpi_status status;
-
-       status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv);
-       if (ACPI_FAILURE(status))
-               return -EIO;
-
-       *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET);
-
-       return 0;
-}
-
 static int int3403_sensor_add(struct int3403_priv *priv)
 {
        int result = 0;
-       acpi_status status;
        struct int3403_sensor *obj;
-       unsigned long long trip_cnt;
-       int trip_mask = 0;
 
        obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL);
        if (!obj)
@@ -250,39 +91,9 @@ static int int3403_sensor_add(struct int3403_priv *priv)
 
        priv->priv = obj;
 
-       status = acpi_evaluate_integer(priv->adev->handle, "PATC", NULL,
-                                               &trip_cnt);
-       if (ACPI_FAILURE(status))
-               trip_cnt = 0;
-
-       if (trip_cnt) {
-               /* We have to cache, thresholds can't be readback */
-               obj->thresholds = devm_kzalloc(&priv->pdev->dev,
-                                       sizeof(*obj->thresholds) * trip_cnt,
-                                       GFP_KERNEL);
-               if (!obj->thresholds) {
-                       result = -ENOMEM;
-                       goto err_free_obj;
-               }
-               trip_mask = BIT(trip_cnt) - 1;
-       }
-
-       obj->psv_trip_id = -1;
-       if (!sys_get_trip_psv(priv->adev, &obj->psv_temp))
-               obj->psv_trip_id = trip_cnt++;
-
-       obj->crit_trip_id = -1;
-       if (!sys_get_trip_crt(priv->adev, &obj->crit_temp))
-               obj->crit_trip_id = trip_cnt++;
-
-       obj->tzone = thermal_zone_device_register(acpi_device_bid(priv->adev),
-                               trip_cnt, trip_mask, priv, &tzone_ops,
-                               &int3403_thermal_params, 0, 0);
-       if (IS_ERR(obj->tzone)) {
-               result = PTR_ERR(obj->tzone);
-               obj->tzone = NULL;
-               goto err_free_obj;
-       }
+       obj->int340x_zone = int340x_thermal_zone_add(priv->adev, NULL);
+       if (IS_ERR(obj->int340x_zone))
+               return PTR_ERR(obj->int340x_zone);
 
        result = acpi_install_notify_handler(priv->adev->handle,
                        ACPI_DEVICE_NOTIFY, int3403_notify,
@@ -293,7 +104,7 @@ static int int3403_sensor_add(struct int3403_priv *priv)
        return 0;
 
  err_free_obj:
-       thermal_zone_device_unregister(obj->tzone);
+       int340x_thermal_zone_remove(obj->int340x_zone);
        return result;
 }
 
@@ -303,7 +114,8 @@ static int int3403_sensor_remove(struct int3403_priv *priv)
 
        acpi_remove_notify_handler(priv->adev->handle,
                                   ACPI_DEVICE_NOTIFY, int3403_notify);
-       thermal_zone_device_unregister(obj->tzone);
+       int340x_thermal_zone_remove(obj->int340x_zone);
+
        return 0;
 }
 
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.c b/drivers/thermal/int340x_thermal/int340x_thermal_zone.c
new file mode 100644 (file)
index 0000000..f88b088
--- /dev/null
@@ -0,0 +1,276 @@
+/*
+ * int340x_thermal_zone.c
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/thermal.h>
+#include "int340x_thermal_zone.h"
+
+static int int340x_thermal_get_zone_temp(struct thermal_zone_device *zone,
+                                        unsigned long *temp)
+{
+       struct int34x_thermal_zone *d = zone->devdata;
+       unsigned long long tmp;
+       acpi_status status;
+
+       if (d->override_ops && d->override_ops->get_temp)
+               return d->override_ops->get_temp(zone, temp);
+
+       status = acpi_evaluate_integer(d->adev->handle, "_TMP", NULL, &tmp);
+       if (ACPI_FAILURE(status))
+               return -EIO;
+
+       if (d->lpat_table) {
+               int conv_temp;
+
+               conv_temp = acpi_lpat_raw_to_temp(d->lpat_table, (int)tmp);
+               if (conv_temp < 0)
+                       return conv_temp;
+
+               *temp = (unsigned long)conv_temp * 10;
+       } else
+               /* _TMP returns the temperature in tenths of degrees Kelvin */
+               *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
+
+       return 0;
+}
+
+static int int340x_thermal_get_trip_temp(struct thermal_zone_device *zone,
+                                        int trip, unsigned long *temp)
+{
+       struct int34x_thermal_zone *d = zone->devdata;
+       int i;
+
+       if (d->override_ops && d->override_ops->get_trip_temp)
+               return d->override_ops->get_trip_temp(zone, trip, temp);
+
+       if (trip < d->aux_trip_nr)
+               *temp = d->aux_trips[trip];
+       else if (trip == d->crt_trip_id)
+               *temp = d->crt_temp;
+       else if (trip == d->psv_trip_id)
+               *temp = d->psv_temp;
+       else if (trip == d->hot_trip_id)
+               *temp = d->hot_temp;
+       else {
+               for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
+                       if (d->act_trips[i].valid &&
+                           d->act_trips[i].id == trip) {
+                               *temp = d->act_trips[i].temp;
+                               break;
+                       }
+               }
+               if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int int340x_thermal_get_trip_type(struct thermal_zone_device *zone,
+                                        int trip,
+                                        enum thermal_trip_type *type)
+{
+       struct int34x_thermal_zone *d = zone->devdata;
+       int i;
+
+       if (d->override_ops && d->override_ops->get_trip_type)
+               return d->override_ops->get_trip_type(zone, trip, type);
+
+       if (trip < d->aux_trip_nr)
+               *type = THERMAL_TRIP_PASSIVE;
+       else if (trip == d->crt_trip_id)
+               *type = THERMAL_TRIP_CRITICAL;
+       else if (trip == d->hot_trip_id)
+               *type = THERMAL_TRIP_HOT;
+       else if (trip == d->psv_trip_id)
+               *type = THERMAL_TRIP_PASSIVE;
+       else {
+               for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
+                       if (d->act_trips[i].valid &&
+                           d->act_trips[i].id == trip) {
+                               *type = THERMAL_TRIP_ACTIVE;
+                               break;
+                       }
+               }
+               if (i == INT340X_THERMAL_MAX_ACT_TRIP_COUNT)
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int int340x_thermal_set_trip_temp(struct thermal_zone_device *zone,
+                                     int trip, unsigned long temp)
+{
+       struct int34x_thermal_zone *d = zone->devdata;
+       acpi_status status;
+       char name[10];
+
+       if (d->override_ops && d->override_ops->set_trip_temp)
+               return d->override_ops->set_trip_temp(zone, trip, temp);
+
+       snprintf(name, sizeof(name), "PAT%d", trip);
+       status = acpi_execute_simple_method(d->adev->handle, name,
+                       MILLICELSIUS_TO_DECI_KELVIN(temp));
+       if (ACPI_FAILURE(status))
+               return -EIO;
+
+       d->aux_trips[trip] = temp;
+
+       return 0;
+}
+
+
+static int int340x_thermal_get_trip_hyst(struct thermal_zone_device *zone,
+               int trip, unsigned long *temp)
+{
+       struct int34x_thermal_zone *d = zone->devdata;
+       acpi_status status;
+       unsigned long long hyst;
+
+       if (d->override_ops && d->override_ops->get_trip_hyst)
+               return d->override_ops->get_trip_hyst(zone, trip, temp);
+
+       status = acpi_evaluate_integer(d->adev->handle, "GTSH", NULL, &hyst);
+       if (ACPI_FAILURE(status))
+               return -EIO;
+
+       *temp = hyst * 100;
+
+       return 0;
+}
+
+static struct thermal_zone_device_ops int340x_thermal_zone_ops = {
+       .get_temp       = int340x_thermal_get_zone_temp,
+       .get_trip_temp  = int340x_thermal_get_trip_temp,
+       .get_trip_type  = int340x_thermal_get_trip_type,
+       .set_trip_temp  = int340x_thermal_set_trip_temp,
+       .get_trip_hyst =  int340x_thermal_get_trip_hyst,
+};
+
+static int int340x_thermal_get_trip_config(acpi_handle handle, char *name,
+                                     unsigned long *temp)
+{
+       unsigned long long r;
+       acpi_status status;
+
+       status = acpi_evaluate_integer(handle, name, NULL, &r);
+       if (ACPI_FAILURE(status))
+               return -EIO;
+
+       *temp = DECI_KELVIN_TO_MILLICELSIUS(r);
+
+       return 0;
+}
+
+static struct thermal_zone_params int340x_thermal_params = {
+       .governor_name = "user_space",
+       .no_hwmon = true,
+};
+
+struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *adev,
+                               struct thermal_zone_device_ops *override_ops)
+{
+       struct int34x_thermal_zone *int34x_thermal_zone;
+       acpi_status status;
+       unsigned long long trip_cnt;
+       int trip_mask = 0, i;
+       int ret;
+
+       int34x_thermal_zone = kzalloc(sizeof(*int34x_thermal_zone),
+                                     GFP_KERNEL);
+       if (!int34x_thermal_zone)
+               return ERR_PTR(-ENOMEM);
+
+       int34x_thermal_zone->adev = adev;
+       int34x_thermal_zone->override_ops = override_ops;
+
+       status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt);
+       if (ACPI_FAILURE(status))
+               trip_cnt = 0;
+       else {
+               int34x_thermal_zone->aux_trips = kzalloc(
+                               sizeof(*int34x_thermal_zone->aux_trips) *
+                               trip_cnt, GFP_KERNEL);
+               if (!int34x_thermal_zone->aux_trips) {
+                       ret = -ENOMEM;
+                       goto free_mem;
+               }
+               trip_mask = BIT(trip_cnt) - 1;
+               int34x_thermal_zone->aux_trip_nr = trip_cnt;
+       }
+
+       int34x_thermal_zone->crt_trip_id = -1;
+       if (!int340x_thermal_get_trip_config(adev->handle, "_CRT",
+                                            &int34x_thermal_zone->crt_temp))
+               int34x_thermal_zone->crt_trip_id = trip_cnt++;
+       int34x_thermal_zone->hot_trip_id = -1;
+       if (!int340x_thermal_get_trip_config(adev->handle, "_HOT",
+                                            &int34x_thermal_zone->hot_temp))
+               int34x_thermal_zone->hot_trip_id = trip_cnt++;
+       int34x_thermal_zone->psv_trip_id = -1;
+       if (!int340x_thermal_get_trip_config(adev->handle, "_PSV",
+                                            &int34x_thermal_zone->psv_temp))
+               int34x_thermal_zone->psv_trip_id = trip_cnt++;
+       for (i = 0; i < INT340X_THERMAL_MAX_ACT_TRIP_COUNT; i++) {
+               char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
+
+               if (int340x_thermal_get_trip_config(adev->handle, name,
+                               &int34x_thermal_zone->act_trips[i].temp))
+                       break;
+
+               int34x_thermal_zone->act_trips[i].id = trip_cnt++;
+               int34x_thermal_zone->act_trips[i].valid = true;
+       }
+       int34x_thermal_zone->lpat_table = acpi_lpat_get_conversion_table(
+                                                               adev->handle);
+
+       int34x_thermal_zone->zone = thermal_zone_device_register(
+                                               acpi_device_bid(adev),
+                                               trip_cnt,
+                                               trip_mask, int34x_thermal_zone,
+                                               &int340x_thermal_zone_ops,
+                                               &int340x_thermal_params,
+                                               0, 0);
+       if (IS_ERR(int34x_thermal_zone->zone)) {
+               ret = PTR_ERR(int34x_thermal_zone->zone);
+               goto free_lpat;
+       }
+
+       return int34x_thermal_zone;
+
+free_lpat:
+       acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+free_mem:
+       kfree(int34x_thermal_zone);
+       return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(int340x_thermal_zone_add);
+
+void int340x_thermal_zone_remove(struct int34x_thermal_zone
+                                *int34x_thermal_zone)
+{
+       thermal_zone_device_unregister(int34x_thermal_zone->zone);
+       acpi_lpat_free_conversion_table(int34x_thermal_zone->lpat_table);
+       kfree(int34x_thermal_zone);
+}
+EXPORT_SYMBOL_GPL(int340x_thermal_zone_remove);
+
+MODULE_AUTHOR("Aaron Lu <aaron.lu@intel.com>");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Intel INT340x common thermal zone handler");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/int340x_thermal/int340x_thermal_zone.h b/drivers/thermal/int340x_thermal/int340x_thermal_zone.h
new file mode 100644 (file)
index 0000000..9f38ab7
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * int340x_thermal_zone.h
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __INT340X_THERMAL_ZONE_H__
+#define __INT340X_THERMAL_ZONE_H__
+
+#include <acpi/acpi_lpat.h>
+
+#define INT340X_THERMAL_MAX_ACT_TRIP_COUNT     10
+
+struct active_trip {
+       unsigned long temp;
+       int id;
+       bool valid;
+};
+
+struct int34x_thermal_zone {
+       struct acpi_device *adev;
+       struct active_trip act_trips[INT340X_THERMAL_MAX_ACT_TRIP_COUNT];
+       unsigned long *aux_trips;
+       int aux_trip_nr;
+       unsigned long psv_temp;
+       int psv_trip_id;
+       unsigned long crt_temp;
+       int crt_trip_id;
+       unsigned long hot_temp;
+       int hot_trip_id;
+       struct thermal_zone_device *zone;
+       struct thermal_zone_device_ops *override_ops;
+       void *priv_data;
+       struct acpi_lpat_conversion_table *lpat_table;
+};
+
+struct int34x_thermal_zone *int340x_thermal_zone_add(struct acpi_device *,
+                               struct thermal_zone_device_ops *override_ops);
+void int340x_thermal_zone_remove(struct int34x_thermal_zone *);
+
+static inline void int340x_thermal_zone_set_priv_data(
+                       struct int34x_thermal_zone *tzone, void *priv_data)
+{
+       tzone->priv_data = priv_data;
+}
+
+static inline void *int340x_thermal_zone_get_priv_data(
+                       struct int34x_thermal_zone *tzone)
+{
+       return tzone->priv_data;
+}
+
+static inline void int340x_thermal_zone_device_update(
+                       struct int34x_thermal_zone *tzone)
+{
+       thermal_zone_device_update(tzone->zone);
+}
+
+#endif
index 0fe5dbbea9687053b835ee12eae553930b5ce1f9..5e8d8e91ea6d9d6056c714b16a3eafd9d09f835e 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/pci.h>
 #include <linux/platform_device.h>
 #include <linux/acpi.h>
+#include <linux/thermal.h>
+#include "int340x_thermal_zone.h"
 
 /* Broadwell-U/HSB thermal reporting device */
 #define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
@@ -39,6 +41,7 @@ struct proc_thermal_device {
        struct device *dev;
        struct acpi_device *adev;
        struct power_config power_limits[2];
+       struct int34x_thermal_zone *int340x_zone;
 };
 
 enum proc_thermal_emum_mode_type {
@@ -117,6 +120,72 @@ static struct attribute_group power_limit_attribute_group = {
        .name = "power_limits"
 };
 
+static int stored_tjmax; /* since it is fixed, we can have local storage */
+
+static int get_tjmax(void)
+{
+       u32 eax, edx;
+       u32 val;
+       int err;
+
+       err = rdmsr_safe(MSR_IA32_TEMPERATURE_TARGET, &eax, &edx);
+       if (err)
+               return err;
+
+       val = (eax >> 16) & 0xff;
+       if (val)
+               return val;
+
+       return -EINVAL;
+}
+
+static int read_temp_msr(unsigned long *temp)
+{
+       int cpu;
+       u32 eax, edx;
+       int err;
+       unsigned long curr_temp_off = 0;
+
+       *temp = 0;
+
+       for_each_online_cpu(cpu) {
+               err = rdmsr_safe_on_cpu(cpu, MSR_IA32_THERM_STATUS, &eax,
+                                       &edx);
+               if (err)
+                       goto err_ret;
+               else {
+                       if (eax & 0x80000000) {
+                               curr_temp_off = (eax >> 16) & 0x7f;
+                               if (!*temp || curr_temp_off < *temp)
+                                       *temp = curr_temp_off;
+                       } else {
+                               err = -EINVAL;
+                               goto err_ret;
+                       }
+               }
+       }
+
+       return 0;
+err_ret:
+       return err;
+}
+
+static int proc_thermal_get_zone_temp(struct thermal_zone_device *zone,
+                                        unsigned long *temp)
+{
+       int ret;
+
+       ret = read_temp_msr(temp);
+       if (!ret)
+               *temp = (stored_tjmax - *temp) * 1000;
+
+       return ret;
+}
+
+static struct thermal_zone_device_ops proc_thermal_local_ops = {
+       .get_temp       = proc_thermal_get_zone_temp,
+};
+
 static int proc_thermal_add(struct device *dev,
                            struct proc_thermal_device **priv)
 {
@@ -126,6 +195,8 @@ static int proc_thermal_add(struct device *dev,
        struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
        union acpi_object *elements, *ppcc;
        union acpi_object *p;
+       unsigned long long tmp;
+       struct thermal_zone_device_ops *ops = NULL;
        int i;
        int ret;
 
@@ -178,6 +249,24 @@ static int proc_thermal_add(struct device *dev,
 
        ret = sysfs_create_group(&dev->kobj,
                                 &power_limit_attribute_group);
+       if (ret)
+               goto free_buffer;
+
+       status = acpi_evaluate_integer(adev->handle, "_TMP", NULL, &tmp);
+       if (ACPI_FAILURE(status)) {
+               /* there is no _TMP method, add local method */
+               stored_tjmax = get_tjmax();
+               if (stored_tjmax > 0)
+                       ops = &proc_thermal_local_ops;
+       }
+
+       proc_priv->int340x_zone = int340x_thermal_zone_add(adev, ops);
+       if (IS_ERR(proc_priv->int340x_zone)) {
+               sysfs_remove_group(&proc_priv->dev->kobj,
+                          &power_limit_attribute_group);
+               ret = PTR_ERR(proc_priv->int340x_zone);
+       } else
+               ret = 0;
 
 free_buffer:
        kfree(buf.pointer);
@@ -185,8 +274,9 @@ free_buffer:
        return ret;
 }
 
-void proc_thermal_remove(struct proc_thermal_device *proc_priv)
+static void proc_thermal_remove(struct proc_thermal_device *proc_priv)
 {
+       int340x_thermal_zone_remove(proc_priv->int340x_zone);
        sysfs_remove_group(&proc_priv->dev->kobj,
                           &power_limit_attribute_group);
 }
index 5580f5b24eb9f59a76ac948c75fa56ca20376323..9013505e43b7358bb98c2b5f4e7b134ae0ce143d 100644 (file)
@@ -309,10 +309,13 @@ static int soc_dts_enable(int id)
        return ret;
 }
 
-static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max)
+static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max,
+                                             bool notification_support)
 {
        struct soc_sensor_entry *aux_entry;
        char name[10];
+       int trip_count = 0;
+       int trip_mask = 0;
        int err;
 
        aux_entry = kzalloc(sizeof(*aux_entry), GFP_KERNEL);
@@ -332,11 +335,16 @@ static struct soc_sensor_entry *alloc_soc_dts(int id, u32 tj_max)
        aux_entry->tj_max = tj_max;
        aux_entry->temp_mask = 0x00FF << (id * 8);
        aux_entry->temp_shift = id * 8;
+       if (notification_support) {
+               trip_count = SOC_MAX_DTS_TRIPS;
+               trip_mask = 0x02;
+       }
        snprintf(name, sizeof(name), "soc_dts%d", id);
        aux_entry->tzone = thermal_zone_device_register(name,
-                       SOC_MAX_DTS_TRIPS,
-                       0x02,
-                       aux_entry, &tzone_ops, NULL, 0, 0);
+                                                       trip_count,
+                                                       trip_mask,
+                                                       aux_entry, &tzone_ops,
+                                                       NULL, 0, 0);
        if (IS_ERR(aux_entry->tzone)) {
                err = PTR_ERR(aux_entry->tzone);
                goto err_ret;
@@ -402,6 +410,7 @@ static irqreturn_t soc_irq_thread_fn(int irq, void *dev_data)
 
 static const struct x86_cpu_id soc_thermal_ids[] = {
        { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x37, 0, BYT_SOC_DTS_APIC_IRQ},
+       { X86_VENDOR_INTEL, X86_FAMILY_ANY, 0x4c, 0, 0},
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, soc_thermal_ids);
@@ -420,8 +429,11 @@ static int __init intel_soc_thermal_init(void)
        if (get_tj_max(&tj_max))
                return -EINVAL;
 
+       soc_dts_thres_irq = (int)match_cpu->driver_data;
+
        for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
-               soc_dts[i] = alloc_soc_dts(i, tj_max);
+               soc_dts[i] = alloc_soc_dts(i, tj_max,
+                                          soc_dts_thres_irq ? true : false);
                if (IS_ERR(soc_dts[i])) {
                        err = PTR_ERR(soc_dts[i]);
                        goto err_free;
@@ -430,15 +442,15 @@ static int __init intel_soc_thermal_init(void)
 
        spin_lock_init(&intr_notify_lock);
 
-       soc_dts_thres_irq = (int)match_cpu->driver_data;
-
-       err = request_threaded_irq(soc_dts_thres_irq, NULL,
-                                       soc_irq_thread_fn,
-                                       IRQF_TRIGGER_RISING | IRQF_ONESHOT,
-                                       "soc_dts", soc_dts);
-       if (err) {
-               pr_err("request_threaded_irq ret %d\n", err);
-               goto err_free;
+       if (soc_dts_thres_irq) {
+               err = request_threaded_irq(soc_dts_thres_irq, NULL,
+                                          soc_irq_thread_fn,
+                                          IRQF_TRIGGER_RISING | IRQF_ONESHOT,
+                                          "soc_dts", soc_dts);
+               if (err) {
+                       pr_err("request_threaded_irq ret %d\n", err);
+                       goto err_free;
+               }
        }
 
        for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i) {
@@ -451,7 +463,8 @@ static int __init intel_soc_thermal_init(void)
 
 err_trip_temp:
        i = SOC_MAX_DTS_SENSORS;
-       free_irq(soc_dts_thres_irq, soc_dts);
+       if (soc_dts_thres_irq)
+               free_irq(soc_dts_thres_irq, soc_dts);
 err_free:
        while (--i >= 0)
                free_soc_dts(soc_dts[i]);
@@ -466,7 +479,8 @@ static void __exit intel_soc_thermal_exit(void)
        for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
                update_trip_temp(soc_dts[i], 0, 0);
 
-       free_irq(soc_dts_thres_irq, soc_dts);
+       if (soc_dts_thres_irq)
+               free_irq(soc_dts_thres_irq, soc_dts);
 
        for (i = 0; i < SOC_MAX_DTS_SENSORS; ++i)
                free_soc_dts(soc_dts[i]);
index d717f3dab6f1410fc955daefb0497c2096298b56..668fb1bdea9eff9b0443453430f1de92335c4104 100644 (file)
@@ -497,6 +497,9 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id, void *data,
                if (sensor_specs.np == sensor_np && id == sensor_id) {
                        tzd = thermal_zone_of_add_sensor(child, sensor_np,
                                                         data, ops);
+                       if (!IS_ERR(tzd))
+                               tzd->ops->set_mode(tzd, THERMAL_DEVICE_ENABLED);
+
                        of_node_put(sensor_specs.np);
                        of_node_put(child);
                        goto exit;
index 9c6ce548e36312f95ca49f6352cf4999a1ab0fe0..3aa46ac7cdbc33765a90279da09fd84507a09d6c 100644 (file)
@@ -193,19 +193,20 @@ static u32 rk_tsadcv2_temp_to_code(long temp)
 
 static long rk_tsadcv2_code_to_temp(u32 code)
 {
-       int high, low, mid;
-
-       low = 0;
-       high = ARRAY_SIZE(v2_code_table) - 1;
-       mid = (high + low) / 2;
-
-       if (code > v2_code_table[low].code || code < v2_code_table[high].code)
-               return 125000; /* No code available, return max temperature */
+       unsigned int low = 0;
+       unsigned int high = ARRAY_SIZE(v2_code_table) - 1;
+       unsigned int mid = (low + high) / 2;
+       unsigned int num;
+       unsigned long denom;
+
+       /* Invalid code, return -EAGAIN */
+       if (code > TSADCV2_DATA_MASK)
+               return -EAGAIN;
 
-       while (low <= high) {
-               if (code >= v2_code_table[mid].code && code <
-                   v2_code_table[mid - 1].code)
-                       return v2_code_table[mid].temp;
+       while (low <= high && mid) {
+               if (code >= v2_code_table[mid].code &&
+                   code < v2_code_table[mid - 1].code)
+                       break;
                else if (code < v2_code_table[mid].code)
                        low = mid + 1;
                else
@@ -213,7 +214,16 @@ static long rk_tsadcv2_code_to_temp(u32 code)
                mid = (low + high) / 2;
        }
 
-       return 125000;
+       /*
+        * The 5C granularity provided by the table is too much. Let's
+        * assume that the relationship between sensor readings and
+        * temperature between 2 table entries is linear and interpolate
+        * to produce less granular result.
+        */
+       num = v2_code_table[mid].temp - v2_code_table[mid - 1].temp;
+       num *= v2_code_table[mid - 1].code - code;
+       denom = v2_code_table[mid - 1].code - v2_code_table[mid].code;
+       return v2_code_table[mid - 1].temp + (num / denom);
 }
 
 /**
index c43306ecc0abbb111dc4c6bfdda5201b64ae1738..c8e35c1a43dcfd19145a6d1e24b132b25b5c6169 100644 (file)
@@ -7,12 +7,3 @@ config EXYNOS_THERMAL
          the TMU, reports temperature and handles cooling action if defined.
          This driver uses the Exynos core thermal APIs and TMU configuration
          data from the supported SoCs.
-
-config EXYNOS_THERMAL_CORE
-       bool "Core thermal framework support for EXYNOS SOCs"
-       depends on EXYNOS_THERMAL
-       help
-         If you say yes here you get support for EXYNOS TMU
-         (Thermal Management Unit) common registration/unregistration
-         functions to the core thermal layer and also to use the generic
-         CPU cooling APIs.
index c09d83095dc2a754770a0bd4cd8c93935aa866e3..1e47d0d89ce06ed28c1202616345a058dd0f7386 100644 (file)
@@ -3,5 +3,3 @@
 #
 obj-$(CONFIG_EXYNOS_THERMAL)                   += exynos_thermal.o
 exynos_thermal-y                               := exynos_tmu.o
-exynos_thermal-y                               += exynos_tmu_data.o
-exynos_thermal-$(CONFIG_EXYNOS_THERMAL_CORE)   += exynos_thermal_common.o
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
deleted file mode 100644 (file)
index 6dc3815..0000000
+++ /dev/null
@@ -1,427 +0,0 @@
-/*
- * exynos_thermal_common.c - Samsung EXYNOS common thermal file
- *
- *  Copyright (C) 2013 Samsung Electronics
- *  Amit Daniel Kachhap <amit.daniel@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#include <linux/cpu_cooling.h>
-#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/thermal.h>
-
-#include "exynos_thermal_common.h"
-
-struct exynos_thermal_zone {
-       enum thermal_device_mode mode;
-       struct thermal_zone_device *therm_dev;
-       struct thermal_cooling_device *cool_dev[MAX_COOLING_DEVICE];
-       unsigned int cool_dev_size;
-       struct platform_device *exynos4_dev;
-       struct thermal_sensor_conf *sensor_conf;
-       bool bind;
-};
-
-/* Get mode callback functions for thermal zone */
-static int exynos_get_mode(struct thermal_zone_device *thermal,
-                       enum thermal_device_mode *mode)
-{
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       if (th_zone)
-               *mode = th_zone->mode;
-       return 0;
-}
-
-/* Set mode callback functions for thermal zone */
-static int exynos_set_mode(struct thermal_zone_device *thermal,
-                       enum thermal_device_mode mode)
-{
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       if (!th_zone) {
-               dev_err(&thermal->device,
-                       "thermal zone not registered\n");
-               return 0;
-       }
-
-       mutex_lock(&thermal->lock);
-
-       if (mode == THERMAL_DEVICE_ENABLED &&
-               !th_zone->sensor_conf->trip_data.trigger_falling)
-               thermal->polling_delay = IDLE_INTERVAL;
-       else
-               thermal->polling_delay = 0;
-
-       mutex_unlock(&thermal->lock);
-
-       th_zone->mode = mode;
-       thermal_zone_device_update(thermal);
-       dev_dbg(th_zone->sensor_conf->dev,
-               "thermal polling set for duration=%d msec\n",
-               thermal->polling_delay);
-       return 0;
-}
-
-
-/* Get trip type callback functions for thermal zone */
-static int exynos_get_trip_type(struct thermal_zone_device *thermal, int trip,
-                                enum thermal_trip_type *type)
-{
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       int max_trip = th_zone->sensor_conf->trip_data.trip_count;
-       int trip_type;
-
-       if (trip < 0 || trip >= max_trip)
-               return -EINVAL;
-
-       trip_type = th_zone->sensor_conf->trip_data.trip_type[trip];
-
-       if (trip_type == SW_TRIP)
-               *type = THERMAL_TRIP_CRITICAL;
-       else if (trip_type == THROTTLE_ACTIVE)
-               *type = THERMAL_TRIP_ACTIVE;
-       else if (trip_type == THROTTLE_PASSIVE)
-               *type = THERMAL_TRIP_PASSIVE;
-       else
-               return -EINVAL;
-
-       return 0;
-}
-
-/* Get trip temperature callback functions for thermal zone */
-static int exynos_get_trip_temp(struct thermal_zone_device *thermal, int trip,
-                               unsigned long *temp)
-{
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       int max_trip = th_zone->sensor_conf->trip_data.trip_count;
-
-       if (trip < 0 || trip >= max_trip)
-               return -EINVAL;
-
-       *temp = th_zone->sensor_conf->trip_data.trip_val[trip];
-       /* convert the temperature into millicelsius */
-       *temp = *temp * MCELSIUS;
-
-       return 0;
-}
-
-/* Get critical temperature callback functions for thermal zone */
-static int exynos_get_crit_temp(struct thermal_zone_device *thermal,
-                               unsigned long *temp)
-{
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       int max_trip = th_zone->sensor_conf->trip_data.trip_count;
-       /* Get the temp of highest trip*/
-       return exynos_get_trip_temp(thermal, max_trip - 1, temp);
-}
-
-/* Bind callback functions for thermal zone */
-static int exynos_bind(struct thermal_zone_device *thermal,
-                       struct thermal_cooling_device *cdev)
-{
-       int ret = 0, i, tab_size, level;
-       struct freq_clip_table *tab_ptr, *clip_data;
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       struct thermal_sensor_conf *data = th_zone->sensor_conf;
-
-       tab_ptr = (struct freq_clip_table *)data->cooling_data.freq_data;
-       tab_size = data->cooling_data.freq_clip_count;
-
-       if (tab_ptr == NULL || tab_size == 0)
-               return 0;
-
-       /* find the cooling device registered*/
-       for (i = 0; i < th_zone->cool_dev_size; i++)
-               if (cdev == th_zone->cool_dev[i])
-                       break;
-
-       /* No matching cooling device */
-       if (i == th_zone->cool_dev_size)
-               return 0;
-
-       /* Bind the thermal zone to the cpufreq cooling device */
-       for (i = 0; i < tab_size; i++) {
-               clip_data = (struct freq_clip_table *)&(tab_ptr[i]);
-               level = cpufreq_cooling_get_level(0, clip_data->freq_clip_max);
-               if (level == THERMAL_CSTATE_INVALID)
-                       return 0;
-               switch (GET_ZONE(i)) {
-               case MONITOR_ZONE:
-               case WARN_ZONE:
-                       if (thermal_zone_bind_cooling_device(thermal, i, cdev,
-                                                               level, 0)) {
-                               dev_err(data->dev,
-                                       "error unbinding cdev inst=%d\n", i);
-                               ret = -EINVAL;
-                       }
-                       th_zone->bind = true;
-                       break;
-               default:
-                       ret = -EINVAL;
-               }
-       }
-
-       return ret;
-}
-
-/* Unbind callback functions for thermal zone */
-static int exynos_unbind(struct thermal_zone_device *thermal,
-                       struct thermal_cooling_device *cdev)
-{
-       int ret = 0, i, tab_size;
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       struct thermal_sensor_conf *data = th_zone->sensor_conf;
-
-       if (th_zone->bind == false)
-               return 0;
-
-       tab_size = data->cooling_data.freq_clip_count;
-
-       if (tab_size == 0)
-               return 0;
-
-       /* find the cooling device registered*/
-       for (i = 0; i < th_zone->cool_dev_size; i++)
-               if (cdev == th_zone->cool_dev[i])
-                       break;
-
-       /* No matching cooling device */
-       if (i == th_zone->cool_dev_size)
-               return 0;
-
-       /* Bind the thermal zone to the cpufreq cooling device */
-       for (i = 0; i < tab_size; i++) {
-               switch (GET_ZONE(i)) {
-               case MONITOR_ZONE:
-               case WARN_ZONE:
-                       if (thermal_zone_unbind_cooling_device(thermal, i,
-                                                               cdev)) {
-                               dev_err(data->dev,
-                                       "error unbinding cdev inst=%d\n", i);
-                               ret = -EINVAL;
-                       }
-                       th_zone->bind = false;
-                       break;
-               default:
-                       ret = -EINVAL;
-               }
-       }
-       return ret;
-}
-
-/* Get temperature callback functions for thermal zone */
-static int exynos_get_temp(struct thermal_zone_device *thermal,
-                       unsigned long *temp)
-{
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-       void *data;
-
-       if (!th_zone->sensor_conf) {
-               dev_err(&thermal->device,
-                       "Temperature sensor not initialised\n");
-               return -EINVAL;
-       }
-       data = th_zone->sensor_conf->driver_data;
-       *temp = th_zone->sensor_conf->read_temperature(data);
-       /* convert the temperature into millicelsius */
-       *temp = *temp * MCELSIUS;
-       return 0;
-}
-
-/* Get temperature callback functions for thermal zone */
-static int exynos_set_emul_temp(struct thermal_zone_device *thermal,
-                                               unsigned long temp)
-{
-       void *data;
-       int ret = -EINVAL;
-       struct exynos_thermal_zone *th_zone = thermal->devdata;
-
-       if (!th_zone->sensor_conf) {
-               dev_err(&thermal->device,
-                       "Temperature sensor not initialised\n");
-               return -EINVAL;
-       }
-       data = th_zone->sensor_conf->driver_data;
-       if (th_zone->sensor_conf->write_emul_temp)
-               ret = th_zone->sensor_conf->write_emul_temp(data, temp);
-       return ret;
-}
-
-/* Get the temperature trend */
-static int exynos_get_trend(struct thermal_zone_device *thermal,
-                       int trip, enum thermal_trend *trend)
-{
-       int ret;
-       unsigned long trip_temp;
-
-       ret = exynos_get_trip_temp(thermal, trip, &trip_temp);
-       if (ret < 0)
-               return ret;
-
-       if (thermal->temperature >= trip_temp)
-               *trend = THERMAL_TREND_RAISE_FULL;
-       else
-               *trend = THERMAL_TREND_DROP_FULL;
-
-       return 0;
-}
-/* Operation callback functions for thermal zone */
-static struct thermal_zone_device_ops exynos_dev_ops = {
-       .bind = exynos_bind,
-       .unbind = exynos_unbind,
-       .get_temp = exynos_get_temp,
-       .set_emul_temp = exynos_set_emul_temp,
-       .get_trend = exynos_get_trend,
-       .get_mode = exynos_get_mode,
-       .set_mode = exynos_set_mode,
-       .get_trip_type = exynos_get_trip_type,
-       .get_trip_temp = exynos_get_trip_temp,
-       .get_crit_temp = exynos_get_crit_temp,
-};
-
-/*
- * This function may be called from interrupt based temperature sensor
- * when threshold is changed.
- */
-void exynos_report_trigger(struct thermal_sensor_conf *conf)
-{
-       unsigned int i;
-       char data[10];
-       char *envp[] = { data, NULL };
-       struct exynos_thermal_zone *th_zone;
-
-       if (!conf || !conf->pzone_data) {
-               pr_err("Invalid temperature sensor configuration data\n");
-               return;
-       }
-
-       th_zone = conf->pzone_data;
-
-       if (th_zone->bind == false) {
-               for (i = 0; i < th_zone->cool_dev_size; i++) {
-                       if (!th_zone->cool_dev[i])
-                               continue;
-                       exynos_bind(th_zone->therm_dev,
-                                       th_zone->cool_dev[i]);
-               }
-       }
-
-       thermal_zone_device_update(th_zone->therm_dev);
-
-       mutex_lock(&th_zone->therm_dev->lock);
-       /* Find the level for which trip happened */
-       for (i = 0; i < th_zone->sensor_conf->trip_data.trip_count; i++) {
-               if (th_zone->therm_dev->last_temperature <
-                       th_zone->sensor_conf->trip_data.trip_val[i] * MCELSIUS)
-                       break;
-       }
-
-       if (th_zone->mode == THERMAL_DEVICE_ENABLED &&
-               !th_zone->sensor_conf->trip_data.trigger_falling) {
-               if (i > 0)
-                       th_zone->therm_dev->polling_delay = ACTIVE_INTERVAL;
-               else
-                       th_zone->therm_dev->polling_delay = IDLE_INTERVAL;
-       }
-
-       snprintf(data, sizeof(data), "%u", i);
-       kobject_uevent_env(&th_zone->therm_dev->device.kobj, KOBJ_CHANGE, envp);
-       mutex_unlock(&th_zone->therm_dev->lock);
-}
-
-/* Register with the in-kernel thermal management */
-int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
-{
-       int ret;
-       struct exynos_thermal_zone *th_zone;
-
-       if (!sensor_conf || !sensor_conf->read_temperature) {
-               pr_err("Temperature sensor not initialised\n");
-               return -EINVAL;
-       }
-
-       th_zone = devm_kzalloc(sensor_conf->dev,
-                               sizeof(struct exynos_thermal_zone), GFP_KERNEL);
-       if (!th_zone)
-               return -ENOMEM;
-
-       th_zone->sensor_conf = sensor_conf;
-       /*
-        * TODO: 1) Handle multiple cooling devices in a thermal zone
-        *       2) Add a flag/name in cooling info to map to specific
-        *       sensor
-        */
-       if (sensor_conf->cooling_data.freq_clip_count > 0) {
-               th_zone->cool_dev[th_zone->cool_dev_size] =
-                               cpufreq_cooling_register(cpu_present_mask);
-               if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
-                       ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]);
-                       if (ret != -EPROBE_DEFER)
-                               dev_err(sensor_conf->dev,
-                                       "Failed to register cpufreq cooling device: %d\n",
-                                       ret);
-                       goto err_unregister;
-               }
-               th_zone->cool_dev_size++;
-       }
-
-       th_zone->therm_dev = thermal_zone_device_register(
-                       sensor_conf->name, sensor_conf->trip_data.trip_count,
-                       0, th_zone, &exynos_dev_ops, NULL, 0,
-                       sensor_conf->trip_data.trigger_falling ? 0 :
-                       IDLE_INTERVAL);
-
-       if (IS_ERR(th_zone->therm_dev)) {
-               dev_err(sensor_conf->dev,
-                       "Failed to register thermal zone device\n");
-               ret = PTR_ERR(th_zone->therm_dev);
-               goto err_unregister;
-       }
-       th_zone->mode = THERMAL_DEVICE_ENABLED;
-       sensor_conf->pzone_data = th_zone;
-
-       dev_info(sensor_conf->dev,
-               "Exynos: Thermal zone(%s) registered\n", sensor_conf->name);
-
-       return 0;
-
-err_unregister:
-       exynos_unregister_thermal(sensor_conf);
-       return ret;
-}
-
-/* Un-Register with the in-kernel thermal management */
-void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf)
-{
-       int i;
-       struct exynos_thermal_zone *th_zone;
-
-       if (!sensor_conf || !sensor_conf->pzone_data) {
-               pr_err("Invalid temperature sensor configuration data\n");
-               return;
-       }
-
-       th_zone = sensor_conf->pzone_data;
-
-       thermal_zone_device_unregister(th_zone->therm_dev);
-
-       for (i = 0; i < th_zone->cool_dev_size; ++i)
-               cpufreq_cooling_unregister(th_zone->cool_dev[i]);
-
-       dev_info(sensor_conf->dev,
-               "Exynos: Kernel Thermal management unregistered\n");
-}
diff --git a/drivers/thermal/samsung/exynos_thermal_common.h b/drivers/thermal/samsung/exynos_thermal_common.h
deleted file mode 100644 (file)
index cd44719..0000000
+++ /dev/null
@@ -1,106 +0,0 @@
-/*
- * exynos_thermal_common.h - Samsung EXYNOS common header file
- *
- *  Copyright (C) 2013 Samsung Electronics
- *  Amit Daniel Kachhap <amit.daniel@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#ifndef _EXYNOS_THERMAL_COMMON_H
-#define _EXYNOS_THERMAL_COMMON_H
-
-/* In-kernel thermal framework related macros & definations */
-#define SENSOR_NAME_LEN        16
-#define MAX_TRIP_COUNT 8
-#define MAX_COOLING_DEVICE 4
-
-#define ACTIVE_INTERVAL 500
-#define IDLE_INTERVAL 10000
-#define MCELSIUS       1000
-
-/* CPU Zone information */
-#define PANIC_ZONE      4
-#define WARN_ZONE       3
-#define MONITOR_ZONE    2
-#define SAFE_ZONE       1
-
-#define GET_ZONE(trip) (trip + 2)
-#define GET_TRIP(zone) (zone - 2)
-
-enum trigger_type {
-       THROTTLE_ACTIVE = 1,
-       THROTTLE_PASSIVE,
-       SW_TRIP,
-       HW_TRIP,
-};
-
-/**
- * struct freq_clip_table
- * @freq_clip_max: maximum frequency allowed for this cooling state.
- * @temp_level: Temperature level at which the temperature clipping will
- *     happen.
- * @mask_val: cpumask of the allowed cpu's where the clipping will take place.
- *
- * This structure is required to be filled and passed to the
- * cpufreq_cooling_unregister function.
- */
-struct freq_clip_table {
-       unsigned int freq_clip_max;
-       unsigned int temp_level;
-       const struct cpumask *mask_val;
-};
-
-struct thermal_trip_point_conf {
-       int trip_val[MAX_TRIP_COUNT];
-       int trip_type[MAX_TRIP_COUNT];
-       int trip_count;
-       unsigned char trigger_falling;
-};
-
-struct thermal_cooling_conf {
-       struct freq_clip_table freq_data[MAX_TRIP_COUNT];
-       int freq_clip_count;
-};
-
-struct thermal_sensor_conf {
-       char name[SENSOR_NAME_LEN];
-       int (*read_temperature)(void *data);
-       int (*write_emul_temp)(void *drv_data, unsigned long temp);
-       struct thermal_trip_point_conf trip_data;
-       struct thermal_cooling_conf cooling_data;
-       void *driver_data;
-       void *pzone_data;
-       struct device *dev;
-};
-
-/*Functions used exynos based thermal sensor driver*/
-#ifdef CONFIG_EXYNOS_THERMAL_CORE
-void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf);
-int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf);
-void exynos_report_trigger(struct thermal_sensor_conf *sensor_conf);
-#else
-static inline void
-exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf) { return; }
-
-static inline int
-exynos_register_thermal(struct thermal_sensor_conf *sensor_conf) { return 0; }
-
-static inline void
-exynos_report_trigger(struct thermal_sensor_conf *sensor_conf) { return; }
-
-#endif /* CONFIG_EXYNOS_THERMAL_CORE */
-#endif /* _EXYNOS_THERMAL_COMMON_H */
index d2f1e62a42328095a35efb25ca461875e9f87c9f..933cd80a6bc5693e23da36048461bfd6250ba40b 100644 (file)
@@ -1,6 +1,10 @@
 /*
  * exynos_tmu.c - Samsung EXYNOS TMU (Thermal Management Unit)
  *
+ *  Copyright (C) 2014 Samsung Electronics
+ *  Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+ *  Lukasz Majewski <l.majewski@samsung.com>
+ *
  *  Copyright (C) 2011 Samsung Electronics
  *  Donggeun Kim <dg77.kim@samsung.com>
  *  Amit Daniel Kachhap <amit.kachhap@linaro.org>
@@ -31,8 +35,8 @@
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 
-#include "exynos_thermal_common.h"
 #include "exynos_tmu.h"
+#include "../thermal_core.h"
 
 /* Exynos generic registers */
 #define EXYNOS_TMU_REG_TRIMINFO                0x0
 #define EXYNOS5440_TMU_TH_RISE4_SHIFT          24
 #define EXYNOS5440_EFUSE_SWAP_OFFSET           8
 
+/* Exynos7 specific registers */
+#define EXYNOS7_THD_TEMP_RISE7_6               0x50
+#define EXYNOS7_THD_TEMP_FALL7_6               0x60
+#define EXYNOS7_TMU_REG_INTEN                  0x110
+#define EXYNOS7_TMU_REG_INTPEND                        0x118
+#define EXYNOS7_TMU_REG_EMUL_CON               0x160
+
+#define EXYNOS7_TMU_TEMP_MASK                  0x1ff
+#define EXYNOS7_PD_DET_EN_SHIFT                        23
+#define EXYNOS7_TMU_INTEN_RISE0_SHIFT          0
+#define EXYNOS7_TMU_INTEN_RISE1_SHIFT          1
+#define EXYNOS7_TMU_INTEN_RISE2_SHIFT          2
+#define EXYNOS7_TMU_INTEN_RISE3_SHIFT          3
+#define EXYNOS7_TMU_INTEN_RISE4_SHIFT          4
+#define EXYNOS7_TMU_INTEN_RISE5_SHIFT          5
+#define EXYNOS7_TMU_INTEN_RISE6_SHIFT          6
+#define EXYNOS7_TMU_INTEN_RISE7_SHIFT          7
+#define EXYNOS7_EMUL_DATA_SHIFT                        7
+#define EXYNOS7_EMUL_DATA_MASK                 0x1ff
+
+#define MCELSIUS       1000
 /**
  * struct exynos_tmu_data : A structure to hold the private data of the TMU
        driver
  * @lock: lock to implement synchronization.
  * @clk: pointer to the clock structure.
  * @clk_sec: pointer to the clock structure for accessing the base_second.
+ * @sclk: pointer to the clock structure for accessing the tmu special clk.
  * @temp_error1: fused value of the first point trim.
  * @temp_error2: fused value of the second point trim.
  * @regulator: pointer to the TMU regulator structure.
@@ -147,10 +173,11 @@ struct exynos_tmu_data {
        enum soc_type soc;
        struct work_struct irq_work;
        struct mutex lock;
-       struct clk *clk, *clk_sec;
-       u8 temp_error1, temp_error2;
+       struct clk *clk, *clk_sec, *sclk;
+       u16 temp_error1, temp_error2;
        struct regulator *regulator;
-       struct thermal_sensor_conf *reg_conf;
+       struct thermal_zone_device *tzd;
+
        int (*tmu_initialize)(struct platform_device *pdev);
        void (*tmu_control)(struct platform_device *pdev, bool on);
        int (*tmu_read)(struct exynos_tmu_data *data);
@@ -159,6 +186,33 @@ struct exynos_tmu_data {
        void (*tmu_clear_irqs)(struct exynos_tmu_data *data);
 };
 
+static void exynos_report_trigger(struct exynos_tmu_data *p)
+{
+       char data[10], *envp[] = { data, NULL };
+       struct thermal_zone_device *tz = p->tzd;
+       unsigned long temp;
+       unsigned int i;
+
+       if (!tz) {
+               pr_err("No thermal zone device defined\n");
+               return;
+       }
+
+       thermal_zone_device_update(tz);
+
+       mutex_lock(&tz->lock);
+       /* Find the level for which trip happened */
+       for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
+               tz->ops->get_trip_temp(tz, i, &temp);
+               if (tz->last_temperature < temp)
+                       break;
+       }
+
+       snprintf(data, sizeof(data), "%u", i);
+       kobject_uevent_env(&tz->device.kobj, KOBJ_CHANGE, envp);
+       mutex_unlock(&tz->lock);
+}
+
 /*
  * TMU treats temperature as a mapped temperature code.
  * The temperature is converted differently depending on the calibration type.
@@ -190,7 +244,7 @@ static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
  * Calculate a temperature value from a temperature code.
  * The unit of the temperature is degree Celsius.
  */
-static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
+static int code_to_temp(struct exynos_tmu_data *data, u16 temp_code)
 {
        struct exynos_tmu_platform_data *pdata = data->pdata;
        int temp;
@@ -234,14 +288,25 @@ static void sanitize_temp_error(struct exynos_tmu_data *data, u32 trim_info)
 
 static u32 get_th_reg(struct exynos_tmu_data *data, u32 threshold, bool falling)
 {
-       struct exynos_tmu_platform_data *pdata = data->pdata;
+       struct thermal_zone_device *tz = data->tzd;
+       const struct thermal_trip * const trips =
+               of_thermal_get_trip_points(tz);
+       unsigned long temp;
        int i;
 
-       for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
-               u8 temp = pdata->trigger_levels[i];
+       if (!trips) {
+               pr_err("%s: Cannot get trip points from of-thermal.c!\n",
+                      __func__);
+               return 0;
+       }
+
+       for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
+               if (trips[i].type == THERMAL_TRIP_CRITICAL)
+                       continue;
 
+               temp = trips[i].temperature / MCELSIUS;
                if (falling)
-                       temp -= pdata->threshold_falling;
+                       temp -= (trips[i].hysteresis / MCELSIUS);
                else
                        threshold &= ~(0xff << 8 * i);
 
@@ -305,9 +370,19 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
 static int exynos4210_tmu_initialize(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-       struct exynos_tmu_platform_data *pdata = data->pdata;
-       unsigned int status;
+       struct thermal_zone_device *tz = data->tzd;
+       const struct thermal_trip * const trips =
+               of_thermal_get_trip_points(tz);
        int ret = 0, threshold_code, i;
+       unsigned long reference, temp;
+       unsigned int status;
+
+       if (!trips) {
+               pr_err("%s: Cannot get trip points from of-thermal.c!\n",
+                      __func__);
+               ret = -ENODEV;
+               goto out;
+       }
 
        status = readb(data->base + EXYNOS_TMU_REG_STATUS);
        if (!status) {
@@ -318,12 +393,19 @@ static int exynos4210_tmu_initialize(struct platform_device *pdev)
        sanitize_temp_error(data, readl(data->base + EXYNOS_TMU_REG_TRIMINFO));
 
        /* Write temperature code for threshold */
-       threshold_code = temp_to_code(data, pdata->threshold);
+       reference = trips[0].temperature / MCELSIUS;
+       threshold_code = temp_to_code(data, reference);
+       if (threshold_code < 0) {
+               ret = threshold_code;
+               goto out;
+       }
        writeb(threshold_code, data->base + EXYNOS4210_TMU_REG_THRESHOLD_TEMP);
 
-       for (i = 0; i < pdata->non_hw_trigger_levels; i++)
-               writeb(pdata->trigger_levels[i], data->base +
+       for (i = 0; i < of_thermal_get_ntrips(tz); i++) {
+               temp = trips[i].temperature / MCELSIUS;
+               writeb(temp - reference, data->base +
                       EXYNOS4210_TMU_REG_TRIG_LEVEL0 + i * 4);
+       }
 
        data->tmu_clear_irqs(data);
 out:
@@ -333,9 +415,11 @@ out:
 static int exynos4412_tmu_initialize(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-       struct exynos_tmu_platform_data *pdata = data->pdata;
+       const struct thermal_trip * const trips =
+               of_thermal_get_trip_points(data->tzd);
        unsigned int status, trim_info, con, ctrl, rising_threshold;
        int ret = 0, threshold_code, i;
+       unsigned long crit_temp = 0;
 
        status = readb(data->base + EXYNOS_TMU_REG_STATUS);
        if (!status) {
@@ -373,17 +457,29 @@ static int exynos4412_tmu_initialize(struct platform_device *pdev)
        data->tmu_clear_irqs(data);
 
        /* if last threshold limit is also present */
-       i = pdata->max_trigger_level - 1;
-       if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) {
-               threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
-               /* 1-4 level to be assigned in th0 reg */
-               rising_threshold &= ~(0xff << 8 * i);
-               rising_threshold |= threshold_code << 8 * i;
-               writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
-               con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
-               con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
-               writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
+       for (i = 0; i < of_thermal_get_ntrips(data->tzd); i++) {
+               if (trips[i].type == THERMAL_TRIP_CRITICAL) {
+                       crit_temp = trips[i].temperature;
+                       break;
+               }
        }
+
+       if (i == of_thermal_get_ntrips(data->tzd)) {
+               pr_err("%s: No CRITICAL trip point defined at of-thermal.c!\n",
+                      __func__);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
+       /* 1-4 level to be assigned in th0 reg */
+       rising_threshold &= ~(0xff << 8 * i);
+       rising_threshold |= threshold_code << 8 * i;
+       writel(rising_threshold, data->base + EXYNOS_THD_TEMP_RISE);
+       con = readl(data->base + EXYNOS_TMU_REG_CONTROL);
+       con |= (1 << EXYNOS_TMU_THERM_TRIP_EN_SHIFT);
+       writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
+
 out:
        return ret;
 }
@@ -391,9 +487,9 @@ out:
 static int exynos5440_tmu_initialize(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-       struct exynos_tmu_platform_data *pdata = data->pdata;
        unsigned int trim_info = 0, con, rising_threshold;
-       int ret = 0, threshold_code, i;
+       int ret = 0, threshold_code;
+       unsigned long crit_temp = 0;
 
        /*
         * For exynos5440 soc triminfo value is swapped between TMU0 and
@@ -422,9 +518,8 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev)
        data->tmu_clear_irqs(data);
 
        /* if last threshold limit is also present */
-       i = pdata->max_trigger_level - 1;
-       if (pdata->trigger_levels[i] && pdata->trigger_type[i] == HW_TRIP) {
-               threshold_code = temp_to_code(data, pdata->trigger_levels[i]);
+       if (!data->tzd->ops->get_crit_temp(data->tzd, &crit_temp)) {
+               threshold_code = temp_to_code(data, crit_temp / MCELSIUS);
                /* 5th level to be assigned in th2 reg */
                rising_threshold =
                        threshold_code << EXYNOS5440_TMU_TH_RISE4_SHIFT;
@@ -439,10 +534,88 @@ static int exynos5440_tmu_initialize(struct platform_device *pdev)
        return ret;
 }
 
-static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
+static int exynos7_tmu_initialize(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       struct thermal_zone_device *tz = data->tzd;
        struct exynos_tmu_platform_data *pdata = data->pdata;
+       unsigned int status, trim_info;
+       unsigned int rising_threshold = 0, falling_threshold = 0;
+       int ret = 0, threshold_code, i;
+       unsigned long temp, temp_hist;
+       unsigned int reg_off, bit_off;
+
+       status = readb(data->base + EXYNOS_TMU_REG_STATUS);
+       if (!status) {
+               ret = -EBUSY;
+               goto out;
+       }
+
+       trim_info = readl(data->base + EXYNOS_TMU_REG_TRIMINFO);
+
+       data->temp_error1 = trim_info & EXYNOS7_TMU_TEMP_MASK;
+       if (!data->temp_error1 ||
+           (pdata->min_efuse_value > data->temp_error1) ||
+           (data->temp_error1 > pdata->max_efuse_value))
+               data->temp_error1 = pdata->efuse_value & EXYNOS_TMU_TEMP_MASK;
+
+       /* Write temperature code for rising and falling threshold */
+       for (i = (of_thermal_get_ntrips(tz) - 1); i >= 0; i--) {
+               /*
+                * On exynos7 there are 4 rising and 4 falling threshold
+                * registers (0x50-0x5c and 0x60-0x6c respectively). Each
+                * register holds the value of two threshold levels (at bit
+                * offsets 0 and 16). Based on the fact that there are atmost
+                * eight possible trigger levels, calculate the register and
+                * bit offsets where the threshold levels are to be written.
+                *
+                * e.g. EXYNOS7_THD_TEMP_RISE7_6 (0x50)
+                * [24:16] - Threshold level 7
+                * [8:0] - Threshold level 6
+                * e.g. EXYNOS7_THD_TEMP_RISE5_4 (0x54)
+                * [24:16] - Threshold level 5
+                * [8:0] - Threshold level 4
+                *
+                * and similarly for falling thresholds.
+                *
+                * Based on the above, calculate the register and bit offsets
+                * for rising/falling threshold levels and populate them.
+                */
+               reg_off = ((7 - i) / 2) * 4;
+               bit_off = ((8 - i) % 2);
+
+               tz->ops->get_trip_temp(tz, i, &temp);
+               temp /= MCELSIUS;
+
+               tz->ops->get_trip_hyst(tz, i, &temp_hist);
+               temp_hist = temp - (temp_hist / MCELSIUS);
+
+               /* Set 9-bit temperature code for rising threshold levels */
+               threshold_code = temp_to_code(data, temp);
+               rising_threshold = readl(data->base +
+                       EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
+               rising_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
+               rising_threshold |= threshold_code << (16 * bit_off);
+               writel(rising_threshold,
+                      data->base + EXYNOS7_THD_TEMP_RISE7_6 + reg_off);
+
+               /* Set 9-bit temperature code for falling threshold levels */
+               threshold_code = temp_to_code(data, temp_hist);
+               falling_threshold &= ~(EXYNOS7_TMU_TEMP_MASK << (16 * bit_off));
+               falling_threshold |= threshold_code << (16 * bit_off);
+               writel(falling_threshold,
+                      data->base + EXYNOS7_THD_TEMP_FALL7_6 + reg_off);
+       }
+
+       data->tmu_clear_irqs(data);
+out:
+       return ret;
+}
+
+static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
+{
+       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       struct thermal_zone_device *tz = data->tzd;
        unsigned int con, interrupt_en;
 
        con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
@@ -450,10 +623,15 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
        if (on) {
                con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
                interrupt_en =
-                       pdata->trigger_enable[3] << EXYNOS_TMU_INTEN_RISE3_SHIFT |
-                       pdata->trigger_enable[2] << EXYNOS_TMU_INTEN_RISE2_SHIFT |
-                       pdata->trigger_enable[1] << EXYNOS_TMU_INTEN_RISE1_SHIFT |
-                       pdata->trigger_enable[0] << EXYNOS_TMU_INTEN_RISE0_SHIFT;
+                       (of_thermal_is_trip_valid(tz, 3)
+                        << EXYNOS_TMU_INTEN_RISE3_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 2)
+                        << EXYNOS_TMU_INTEN_RISE2_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 1)
+                        << EXYNOS_TMU_INTEN_RISE1_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 0)
+                        << EXYNOS_TMU_INTEN_RISE0_SHIFT);
+
                if (data->soc != SOC_ARCH_EXYNOS4210)
                        interrupt_en |=
                                interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
@@ -468,7 +646,7 @@ static void exynos4210_tmu_control(struct platform_device *pdev, bool on)
 static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
-       struct exynos_tmu_platform_data *pdata = data->pdata;
+       struct thermal_zone_device *tz = data->tzd;
        unsigned int con, interrupt_en;
 
        con = get_con_reg(data, readl(data->base + EXYNOS5440_TMU_S0_7_CTRL));
@@ -476,11 +654,16 @@ static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
        if (on) {
                con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
                interrupt_en =
-                       pdata->trigger_enable[3] << EXYNOS5440_TMU_INTEN_RISE3_SHIFT |
-                       pdata->trigger_enable[2] << EXYNOS5440_TMU_INTEN_RISE2_SHIFT |
-                       pdata->trigger_enable[1] << EXYNOS5440_TMU_INTEN_RISE1_SHIFT |
-                       pdata->trigger_enable[0] << EXYNOS5440_TMU_INTEN_RISE0_SHIFT;
-               interrupt_en |= interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
+                       (of_thermal_is_trip_valid(tz, 3)
+                        << EXYNOS5440_TMU_INTEN_RISE3_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 2)
+                        << EXYNOS5440_TMU_INTEN_RISE2_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 1)
+                        << EXYNOS5440_TMU_INTEN_RISE1_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 0)
+                        << EXYNOS5440_TMU_INTEN_RISE0_SHIFT);
+               interrupt_en |=
+                       interrupt_en << EXYNOS5440_TMU_INTEN_FALL0_SHIFT;
        } else {
                con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
                interrupt_en = 0; /* Disable all interrupts */
@@ -489,19 +672,62 @@ static void exynos5440_tmu_control(struct platform_device *pdev, bool on)
        writel(con, data->base + EXYNOS5440_TMU_S0_7_CTRL);
 }
 
-static int exynos_tmu_read(struct exynos_tmu_data *data)
+static void exynos7_tmu_control(struct platform_device *pdev, bool on)
 {
-       int ret;
+       struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       struct thermal_zone_device *tz = data->tzd;
+       unsigned int con, interrupt_en;
+
+       con = get_con_reg(data, readl(data->base + EXYNOS_TMU_REG_CONTROL));
+
+       if (on) {
+               con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
+               interrupt_en =
+                       (of_thermal_is_trip_valid(tz, 7)
+                       << EXYNOS7_TMU_INTEN_RISE7_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 6)
+                       << EXYNOS7_TMU_INTEN_RISE6_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 5)
+                       << EXYNOS7_TMU_INTEN_RISE5_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 4)
+                       << EXYNOS7_TMU_INTEN_RISE4_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 3)
+                       << EXYNOS7_TMU_INTEN_RISE3_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 2)
+                       << EXYNOS7_TMU_INTEN_RISE2_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 1)
+                       << EXYNOS7_TMU_INTEN_RISE1_SHIFT) |
+                       (of_thermal_is_trip_valid(tz, 0)
+                       << EXYNOS7_TMU_INTEN_RISE0_SHIFT);
+
+               interrupt_en |=
+                       interrupt_en << EXYNOS_TMU_INTEN_FALL0_SHIFT;
+       } else {
+               con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
+               interrupt_en = 0; /* Disable all interrupts */
+       }
+       con |= 1 << EXYNOS7_PD_DET_EN_SHIFT;
+
+       writel(interrupt_en, data->base + EXYNOS7_TMU_REG_INTEN);
+       writel(con, data->base + EXYNOS_TMU_REG_CONTROL);
+}
+
+static int exynos_get_temp(void *p, long *temp)
+{
+       struct exynos_tmu_data *data = p;
+
+       if (!data || !data->tmu_read)
+               return -EINVAL;
 
        mutex_lock(&data->lock);
        clk_enable(data->clk);
-       ret = data->tmu_read(data);
-       if (ret >= 0)
-               ret = code_to_temp(data, ret);
+
+       *temp = code_to_temp(data, data->tmu_read(data)) * MCELSIUS;
+
        clk_disable(data->clk);
        mutex_unlock(&data->lock);
 
-       return ret;
+       return 0;
 }
 
 #ifdef CONFIG_THERMAL_EMULATION
@@ -515,9 +741,19 @@ static u32 get_emul_con_reg(struct exynos_tmu_data *data, unsigned int val,
                        val &= ~(EXYNOS_EMUL_TIME_MASK << EXYNOS_EMUL_TIME_SHIFT);
                        val |= (EXYNOS_EMUL_TIME << EXYNOS_EMUL_TIME_SHIFT);
                }
-               val &= ~(EXYNOS_EMUL_DATA_MASK << EXYNOS_EMUL_DATA_SHIFT);
-               val |= (temp_to_code(data, temp) << EXYNOS_EMUL_DATA_SHIFT) |
-                       EXYNOS_EMUL_ENABLE;
+               if (data->soc == SOC_ARCH_EXYNOS7) {
+                       val &= ~(EXYNOS7_EMUL_DATA_MASK <<
+                               EXYNOS7_EMUL_DATA_SHIFT);
+                       val |= (temp_to_code(data, temp) <<
+                               EXYNOS7_EMUL_DATA_SHIFT) |
+                               EXYNOS_EMUL_ENABLE;
+               } else {
+                       val &= ~(EXYNOS_EMUL_DATA_MASK <<
+                               EXYNOS_EMUL_DATA_SHIFT);
+                       val |= (temp_to_code(data, temp) <<
+                               EXYNOS_EMUL_DATA_SHIFT) |
+                               EXYNOS_EMUL_ENABLE;
+               }
        } else {
                val &= ~EXYNOS_EMUL_ENABLE;
        }
@@ -533,6 +769,8 @@ static void exynos4412_tmu_set_emulation(struct exynos_tmu_data *data,
 
        if (data->soc == SOC_ARCH_EXYNOS5260)
                emul_con = EXYNOS5260_EMUL_CON;
+       else if (data->soc == SOC_ARCH_EXYNOS7)
+               emul_con = EXYNOS7_TMU_REG_EMUL_CON;
        else
                emul_con = EXYNOS_EMUL_CON;
 
@@ -576,7 +814,7 @@ out:
 #define exynos5440_tmu_set_emulation NULL
 static int exynos_tmu_set_emulation(void *drv_data,    unsigned long temp)
        { return -EINVAL; }
-#endif/*CONFIG_THERMAL_EMULATION*/
+#endif /* CONFIG_THERMAL_EMULATION */
 
 static int exynos4210_tmu_read(struct exynos_tmu_data *data)
 {
@@ -596,6 +834,12 @@ static int exynos5440_tmu_read(struct exynos_tmu_data *data)
        return readb(data->base + EXYNOS5440_TMU_S0_7_TEMP);
 }
 
+static int exynos7_tmu_read(struct exynos_tmu_data *data)
+{
+       return readw(data->base + EXYNOS_TMU_REG_CURRENT_TEMP) &
+               EXYNOS7_TMU_TEMP_MASK;
+}
+
 static void exynos_tmu_work(struct work_struct *work)
 {
        struct exynos_tmu_data *data = container_of(work,
@@ -613,7 +857,7 @@ static void exynos_tmu_work(struct work_struct *work)
        if (!IS_ERR(data->clk_sec))
                clk_disable(data->clk_sec);
 
-       exynos_report_trigger(data->reg_conf);
+       exynos_report_trigger(data);
        mutex_lock(&data->lock);
        clk_enable(data->clk);
 
@@ -634,6 +878,9 @@ static void exynos4210_tmu_clear_irqs(struct exynos_tmu_data *data)
        if (data->soc == SOC_ARCH_EXYNOS5260) {
                tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT;
                tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR;
+       } else if (data->soc == SOC_ARCH_EXYNOS7) {
+               tmu_intstat = EXYNOS7_TMU_REG_INTPEND;
+               tmu_intclear = EXYNOS7_TMU_REG_INTPEND;
        } else {
                tmu_intstat = EXYNOS_TMU_REG_INTSTAT;
                tmu_intclear = EXYNOS_TMU_REG_INTCLEAR;
@@ -673,55 +920,94 @@ static irqreturn_t exynos_tmu_irq(int irq, void *id)
 static const struct of_device_id exynos_tmu_match[] = {
        {
                .compatible = "samsung,exynos3250-tmu",
-               .data = &exynos3250_default_tmu_data,
        },
        {
                .compatible = "samsung,exynos4210-tmu",
-               .data = &exynos4210_default_tmu_data,
        },
        {
                .compatible = "samsung,exynos4412-tmu",
-               .data = &exynos4412_default_tmu_data,
        },
        {
                .compatible = "samsung,exynos5250-tmu",
-               .data = &exynos5250_default_tmu_data,
        },
        {
                .compatible = "samsung,exynos5260-tmu",
-               .data = &exynos5260_default_tmu_data,
        },
        {
                .compatible = "samsung,exynos5420-tmu",
-               .data = &exynos5420_default_tmu_data,
        },
        {
                .compatible = "samsung,exynos5420-tmu-ext-triminfo",
-               .data = &exynos5420_default_tmu_data,
        },
        {
                .compatible = "samsung,exynos5440-tmu",
-               .data = &exynos5440_default_tmu_data,
+       },
+       {
+               .compatible = "samsung,exynos7-tmu",
        },
        {},
 };
 MODULE_DEVICE_TABLE(of, exynos_tmu_match);
 
-static inline struct  exynos_tmu_platform_data *exynos_get_driver_data(
-                       struct platform_device *pdev, int id)
+static int exynos_of_get_soc_type(struct device_node *np)
+{
+       if (of_device_is_compatible(np, "samsung,exynos3250-tmu"))
+               return SOC_ARCH_EXYNOS3250;
+       else if (of_device_is_compatible(np, "samsung,exynos4210-tmu"))
+               return SOC_ARCH_EXYNOS4210;
+       else if (of_device_is_compatible(np, "samsung,exynos4412-tmu"))
+               return SOC_ARCH_EXYNOS4412;
+       else if (of_device_is_compatible(np, "samsung,exynos5250-tmu"))
+               return SOC_ARCH_EXYNOS5250;
+       else if (of_device_is_compatible(np, "samsung,exynos5260-tmu"))
+               return SOC_ARCH_EXYNOS5260;
+       else if (of_device_is_compatible(np, "samsung,exynos5420-tmu"))
+               return SOC_ARCH_EXYNOS5420;
+       else if (of_device_is_compatible(np,
+                                        "samsung,exynos5420-tmu-ext-triminfo"))
+               return SOC_ARCH_EXYNOS5420_TRIMINFO;
+       else if (of_device_is_compatible(np, "samsung,exynos5440-tmu"))
+               return SOC_ARCH_EXYNOS5440;
+       else if (of_device_is_compatible(np, "samsung,exynos7-tmu"))
+               return SOC_ARCH_EXYNOS7;
+
+       return -EINVAL;
+}
+
+static int exynos_of_sensor_conf(struct device_node *np,
+                                struct exynos_tmu_platform_data *pdata)
 {
-       struct  exynos_tmu_init_data *data_table;
-       struct exynos_tmu_platform_data *tmu_data;
-       const struct of_device_id *match;
+       u32 value;
+       int ret;
 
-       match = of_match_node(exynos_tmu_match, pdev->dev.of_node);
-       if (!match)
-               return NULL;
-       data_table = (struct exynos_tmu_init_data *) match->data;
-       if (!data_table || id >= data_table->tmu_count)
-               return NULL;
-       tmu_data = data_table->tmu_data;
-       return (struct exynos_tmu_platform_data *) (tmu_data + id);
+       of_node_get(np);
+
+       ret = of_property_read_u32(np, "samsung,tmu_gain", &value);
+       pdata->gain = (u8)value;
+       of_property_read_u32(np, "samsung,tmu_reference_voltage", &value);
+       pdata->reference_voltage = (u8)value;
+       of_property_read_u32(np, "samsung,tmu_noise_cancel_mode", &value);
+       pdata->noise_cancel_mode = (u8)value;
+
+       of_property_read_u32(np, "samsung,tmu_efuse_value",
+                            &pdata->efuse_value);
+       of_property_read_u32(np, "samsung,tmu_min_efuse_value",
+                            &pdata->min_efuse_value);
+       of_property_read_u32(np, "samsung,tmu_max_efuse_value",
+                            &pdata->max_efuse_value);
+
+       of_property_read_u32(np, "samsung,tmu_first_point_trim", &value);
+       pdata->first_point_trim = (u8)value;
+       of_property_read_u32(np, "samsung,tmu_second_point_trim", &value);
+       pdata->second_point_trim = (u8)value;
+       of_property_read_u32(np, "samsung,tmu_default_temp_offset", &value);
+       pdata->default_temp_offset = (u8)value;
+
+       of_property_read_u32(np, "samsung,tmu_cal_type", &pdata->cal_type);
+       of_property_read_u32(np, "samsung,tmu_cal_mode", &pdata->cal_mode);
+
+       of_node_put(np);
+       return 0;
 }
 
 static int exynos_map_dt_data(struct platform_device *pdev)
@@ -771,14 +1057,15 @@ static int exynos_map_dt_data(struct platform_device *pdev)
                return -EADDRNOTAVAIL;
        }
 
-       pdata = exynos_get_driver_data(pdev, data->id);
-       if (!pdata) {
-               dev_err(&pdev->dev, "No platform init data supplied.\n");
-               return -ENODEV;
-       }
+       pdata = devm_kzalloc(&pdev->dev,
+                            sizeof(struct exynos_tmu_platform_data),
+                            GFP_KERNEL);
+       if (!pdata)
+               return -ENOMEM;
 
+       exynos_of_sensor_conf(pdev->dev.of_node, pdata);
        data->pdata = pdata;
-       data->soc = pdata->type;
+       data->soc = exynos_of_get_soc_type(pdev->dev.of_node);
 
        switch (data->soc) {
        case SOC_ARCH_EXYNOS4210:
@@ -806,6 +1093,13 @@ static int exynos_map_dt_data(struct platform_device *pdev)
                data->tmu_set_emulation = exynos5440_tmu_set_emulation;
                data->tmu_clear_irqs = exynos5440_tmu_clear_irqs;
                break;
+       case SOC_ARCH_EXYNOS7:
+               data->tmu_initialize = exynos7_tmu_initialize;
+               data->tmu_control = exynos7_tmu_control;
+               data->tmu_read = exynos7_tmu_read;
+               data->tmu_set_emulation = exynos4412_tmu_set_emulation;
+               data->tmu_clear_irqs = exynos4210_tmu_clear_irqs;
+               break;
        default:
                dev_err(&pdev->dev, "Platform not supported\n");
                return -EINVAL;
@@ -834,12 +1128,16 @@ static int exynos_map_dt_data(struct platform_device *pdev)
        return 0;
 }
 
+static struct thermal_zone_of_device_ops exynos_sensor_ops = {
+       .get_temp = exynos_get_temp,
+       .set_emul_temp = exynos_tmu_set_emulation,
+};
+
 static int exynos_tmu_probe(struct platform_device *pdev)
 {
-       struct exynos_tmu_data *data;
        struct exynos_tmu_platform_data *pdata;
-       struct thermal_sensor_conf *sensor_conf;
-       int ret, i;
+       struct exynos_tmu_data *data;
+       int ret;
 
        data = devm_kzalloc(&pdev->dev, sizeof(struct exynos_tmu_data),
                                        GFP_KERNEL);
@@ -849,9 +1147,15 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        platform_set_drvdata(pdev, data);
        mutex_init(&data->lock);
 
+       data->tzd = thermal_zone_of_sensor_register(&pdev->dev, 0, data,
+                                                   &exynos_sensor_ops);
+       if (IS_ERR(data->tzd)) {
+               pr_err("thermal: tz: %p ERROR\n", data->tzd);
+               return PTR_ERR(data->tzd);
+       }
        ret = exynos_map_dt_data(pdev);
        if (ret)
-               return ret;
+               goto err_sensor;
 
        pdata = data->pdata;
 
@@ -860,20 +1164,22 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
        if (IS_ERR(data->clk)) {
                dev_err(&pdev->dev, "Failed to get clock\n");
-               return  PTR_ERR(data->clk);
+               ret = PTR_ERR(data->clk);
+               goto err_sensor;
        }
 
        data->clk_sec = devm_clk_get(&pdev->dev, "tmu_triminfo_apbif");
        if (IS_ERR(data->clk_sec)) {
                if (data->soc == SOC_ARCH_EXYNOS5420_TRIMINFO) {
                        dev_err(&pdev->dev, "Failed to get triminfo clock\n");
-                       return PTR_ERR(data->clk_sec);
+                       ret = PTR_ERR(data->clk_sec);
+                       goto err_sensor;
                }
        } else {
                ret = clk_prepare(data->clk_sec);
                if (ret) {
                        dev_err(&pdev->dev, "Failed to get clock\n");
-                       return ret;
+                       goto err_sensor;
                }
        }
 
@@ -883,82 +1189,57 @@ static int exynos_tmu_probe(struct platform_device *pdev)
                goto err_clk_sec;
        }
 
-       ret = exynos_tmu_initialize(pdev);
-       if (ret) {
-               dev_err(&pdev->dev, "Failed to initialize TMU\n");
-               goto err_clk;
+       if (data->soc == SOC_ARCH_EXYNOS7) {
+               data->sclk = devm_clk_get(&pdev->dev, "tmu_sclk");
+               if (IS_ERR(data->sclk)) {
+                       dev_err(&pdev->dev, "Failed to get sclk\n");
+                       goto err_clk;
+               } else {
+                       ret = clk_prepare_enable(data->sclk);
+                       if (ret) {
+                               dev_err(&pdev->dev, "Failed to enable sclk\n");
+                               goto err_clk;
+                       }
+               }
        }
 
-       exynos_tmu_control(pdev, true);
-
-       /* Allocate a structure to register with the exynos core thermal */
-       sensor_conf = devm_kzalloc(&pdev->dev,
-                               sizeof(struct thermal_sensor_conf), GFP_KERNEL);
-       if (!sensor_conf) {
-               ret = -ENOMEM;
-               goto err_clk;
-       }
-       sprintf(sensor_conf->name, "therm_zone%d", data->id);
-       sensor_conf->read_temperature = (int (*)(void *))exynos_tmu_read;
-       sensor_conf->write_emul_temp =
-               (int (*)(void *, unsigned long))exynos_tmu_set_emulation;
-       sensor_conf->driver_data = data;
-       sensor_conf->trip_data.trip_count = pdata->trigger_enable[0] +
-                       pdata->trigger_enable[1] + pdata->trigger_enable[2]+
-                       pdata->trigger_enable[3];
-
-       for (i = 0; i < sensor_conf->trip_data.trip_count; i++) {
-               sensor_conf->trip_data.trip_val[i] =
-                       pdata->threshold + pdata->trigger_levels[i];
-               sensor_conf->trip_data.trip_type[i] =
-                                       pdata->trigger_type[i];
-       }
-
-       sensor_conf->trip_data.trigger_falling = pdata->threshold_falling;
-
-       sensor_conf->cooling_data.freq_clip_count = pdata->freq_tab_count;
-       for (i = 0; i < pdata->freq_tab_count; i++) {
-               sensor_conf->cooling_data.freq_data[i].freq_clip_max =
-                                       pdata->freq_tab[i].freq_clip_max;
-               sensor_conf->cooling_data.freq_data[i].temp_level =
-                                       pdata->freq_tab[i].temp_level;
-       }
-       sensor_conf->dev = &pdev->dev;
-       /* Register the sensor with thermal management interface */
-       ret = exynos_register_thermal(sensor_conf);
+       ret = exynos_tmu_initialize(pdev);
        if (ret) {
-               if (ret != -EPROBE_DEFER)
-                       dev_err(&pdev->dev,
-                               "Failed to register thermal interface: %d\n",
-                               ret);
-               goto err_clk;
+               dev_err(&pdev->dev, "Failed to initialize TMU\n");
+               goto err_sclk;
        }
-       data->reg_conf = sensor_conf;
 
        ret = devm_request_irq(&pdev->dev, data->irq, exynos_tmu_irq,
                IRQF_TRIGGER_RISING | IRQF_SHARED, dev_name(&pdev->dev), data);
        if (ret) {
                dev_err(&pdev->dev, "Failed to request irq: %d\n", data->irq);
-               goto err_clk;
+               goto err_sclk;
        }
 
+       exynos_tmu_control(pdev, true);
        return 0;
+err_sclk:
+       clk_disable_unprepare(data->sclk);
 err_clk:
        clk_unprepare(data->clk);
 err_clk_sec:
        if (!IS_ERR(data->clk_sec))
                clk_unprepare(data->clk_sec);
+err_sensor:
+       thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
+
        return ret;
 }
 
 static int exynos_tmu_remove(struct platform_device *pdev)
 {
        struct exynos_tmu_data *data = platform_get_drvdata(pdev);
+       struct thermal_zone_device *tzd = data->tzd;
 
-       exynos_unregister_thermal(data->reg_conf);
-
+       thermal_zone_of_sensor_unregister(&pdev->dev, tzd);
        exynos_tmu_control(pdev, false);
 
+       clk_disable_unprepare(data->sclk);
        clk_unprepare(data->clk);
        if (!IS_ERR(data->clk_sec))
                clk_unprepare(data->clk_sec);
index da3009bff6c439d013f21710b05792fc86b146b0..4d71ec6c9aa0ba10b86c3975d92d5ebcea00d543 100644 (file)
 #ifndef _EXYNOS_TMU_H
 #define _EXYNOS_TMU_H
 #include <linux/cpu_cooling.h>
-
-#include "exynos_thermal_common.h"
-
-enum calibration_type {
-       TYPE_ONE_POINT_TRIMMING,
-       TYPE_ONE_POINT_TRIMMING_25,
-       TYPE_ONE_POINT_TRIMMING_85,
-       TYPE_TWO_POINT_TRIMMING,
-       TYPE_NONE,
-};
+#include <dt-bindings/thermal/thermal_exynos.h>
 
 enum soc_type {
        SOC_ARCH_EXYNOS3250 = 1,
@@ -43,38 +34,11 @@ enum soc_type {
        SOC_ARCH_EXYNOS5420,
        SOC_ARCH_EXYNOS5420_TRIMINFO,
        SOC_ARCH_EXYNOS5440,
+       SOC_ARCH_EXYNOS7,
 };
 
 /**
  * struct exynos_tmu_platform_data
- * @threshold: basic temperature for generating interrupt
- *            25 <= threshold <= 125 [unit: degree Celsius]
- * @threshold_falling: differntial value for setting threshold
- *                    of temperature falling interrupt.
- * @trigger_levels: array for each interrupt levels
- *     [unit: degree Celsius]
- *     0: temperature for trigger_level0 interrupt
- *        condition for trigger_level0 interrupt:
- *             current temperature > threshold + trigger_levels[0]
- *     1: temperature for trigger_level1 interrupt
- *        condition for trigger_level1 interrupt:
- *             current temperature > threshold + trigger_levels[1]
- *     2: temperature for trigger_level2 interrupt
- *        condition for trigger_level2 interrupt:
- *             current temperature > threshold + trigger_levels[2]
- *     3: temperature for trigger_level3 interrupt
- *        condition for trigger_level3 interrupt:
- *             current temperature > threshold + trigger_levels[3]
- * @trigger_type: defines the type of trigger. Possible values are,
- *     THROTTLE_ACTIVE trigger type
- *     THROTTLE_PASSIVE trigger type
- *     SW_TRIP trigger type
- *     HW_TRIP
- * @trigger_enable[]: array to denote which trigger levels are enabled.
- *     1 = enable trigger_level[] interrupt,
- *     0 = disable trigger_level[] interrupt
- * @max_trigger_level: max trigger level supported by the TMU
- * @non_hw_trigger_levels: number of defined non-hardware trigger levels
  * @gain: gain of amplifier in the positive-TC generator block
  *     0 < gain <= 15
  * @reference_voltage: reference voltage of amplifier
@@ -86,24 +50,12 @@ enum soc_type {
  * @efuse_value: platform defined fuse value
  * @min_efuse_value: minimum valid trimming data
  * @max_efuse_value: maximum valid trimming data
- * @first_point_trim: temp value of the first point trimming
- * @second_point_trim: temp value of the second point trimming
  * @default_temp_offset: default temperature offset in case of no trimming
  * @cal_type: calibration type for temperature
- * @freq_clip_table: Table representing frequency reduction percentage.
- * @freq_tab_count: Count of the above table as frequency reduction may
- *     applicable to only some of the trigger levels.
  *
  * This structure is required for configuration of exynos_tmu driver.
  */
 struct exynos_tmu_platform_data {
-       u8 threshold;
-       u8 threshold_falling;
-       u8 trigger_levels[MAX_TRIP_COUNT];
-       enum trigger_type trigger_type[MAX_TRIP_COUNT];
-       bool trigger_enable[MAX_TRIP_COUNT];
-       u8 max_trigger_level;
-       u8 non_hw_trigger_levels;
        u8 gain;
        u8 reference_voltage;
        u8 noise_cancel_mode;
@@ -115,30 +67,9 @@ struct exynos_tmu_platform_data {
        u8 second_point_trim;
        u8 default_temp_offset;
 
-       enum calibration_type cal_type;
        enum soc_type type;
-       struct freq_clip_table freq_tab[4];
-       unsigned int freq_tab_count;
-};
-
-/**
- * struct exynos_tmu_init_data
- * @tmu_count: number of TMU instances.
- * @tmu_data: platform data of all TMU instances.
- * This structure is required to store data for multi-instance exynos tmu
- * driver.
- */
-struct exynos_tmu_init_data {
-       int tmu_count;
-       struct exynos_tmu_platform_data tmu_data[];
+       u32 cal_type;
+       u32 cal_mode;
 };
 
-extern struct exynos_tmu_init_data const exynos3250_default_tmu_data;
-extern struct exynos_tmu_init_data const exynos4210_default_tmu_data;
-extern struct exynos_tmu_init_data const exynos4412_default_tmu_data;
-extern struct exynos_tmu_init_data const exynos5250_default_tmu_data;
-extern struct exynos_tmu_init_data const exynos5260_default_tmu_data;
-extern struct exynos_tmu_init_data const exynos5420_default_tmu_data;
-extern struct exynos_tmu_init_data const exynos5440_default_tmu_data;
-
 #endif /* _EXYNOS_TMU_H */
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
deleted file mode 100644 (file)
index b239100..0000000
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * exynos_tmu_data.c - Samsung EXYNOS tmu data file
- *
- *  Copyright (C) 2013 Samsung Electronics
- *  Amit Daniel Kachhap <amit.daniel@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- */
-
-#include "exynos_thermal_common.h"
-#include "exynos_tmu.h"
-
-struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
-       .tmu_data = {
-               {
-               .threshold = 80,
-               .trigger_levels[0] = 5,
-               .trigger_levels[1] = 20,
-               .trigger_levels[2] = 30,
-               .trigger_enable[0] = true,
-               .trigger_enable[1] = true,
-               .trigger_enable[2] = true,
-               .trigger_enable[3] = false,
-               .trigger_type[0] = THROTTLE_ACTIVE,
-               .trigger_type[1] = THROTTLE_ACTIVE,
-               .trigger_type[2] = SW_TRIP,
-               .max_trigger_level = 4,
-               .non_hw_trigger_levels = 3,
-               .gain = 15,
-               .reference_voltage = 7,
-               .cal_type = TYPE_ONE_POINT_TRIMMING,
-               .min_efuse_value = 40,
-               .max_efuse_value = 100,
-               .first_point_trim = 25,
-               .second_point_trim = 85,
-               .default_temp_offset = 50,
-               .freq_tab[0] = {
-                       .freq_clip_max = 800 * 1000,
-                       .temp_level = 85,
-                       },
-               .freq_tab[1] = {
-                       .freq_clip_max = 200 * 1000,
-                       .temp_level = 100,
-               },
-               .freq_tab_count = 2,
-               .type = SOC_ARCH_EXYNOS4210,
-               },
-       },
-       .tmu_count = 1,
-};
-
-#define EXYNOS3250_TMU_DATA \
-       .threshold_falling = 10, \
-       .trigger_levels[0] = 70, \
-       .trigger_levels[1] = 95, \
-       .trigger_levels[2] = 110, \
-       .trigger_levels[3] = 120, \
-       .trigger_enable[0] = true, \
-       .trigger_enable[1] = true, \
-       .trigger_enable[2] = true, \
-       .trigger_enable[3] = false, \
-       .trigger_type[0] = THROTTLE_ACTIVE, \
-       .trigger_type[1] = THROTTLE_ACTIVE, \
-       .trigger_type[2] = SW_TRIP, \
-       .trigger_type[3] = HW_TRIP, \
-       .max_trigger_level = 4, \
-       .non_hw_trigger_levels = 3, \
-       .gain = 8, \
-       .reference_voltage = 16, \
-       .noise_cancel_mode = 4, \
-       .cal_type = TYPE_TWO_POINT_TRIMMING, \
-       .efuse_value = 55, \
-       .min_efuse_value = 40, \
-       .max_efuse_value = 100, \
-       .first_point_trim = 25, \
-       .second_point_trim = 85, \
-       .default_temp_offset = 50, \
-       .freq_tab[0] = { \
-               .freq_clip_max = 800 * 1000, \
-               .temp_level = 70, \
-       }, \
-       .freq_tab[1] = { \
-               .freq_clip_max = 400 * 1000, \
-               .temp_level = 95, \
-       }, \
-       .freq_tab_count = 2
-
-struct exynos_tmu_init_data const exynos3250_default_tmu_data = {
-       .tmu_data = {
-               {
-                       EXYNOS3250_TMU_DATA,
-                       .type = SOC_ARCH_EXYNOS3250,
-               },
-       },
-       .tmu_count = 1,
-};
-
-#define EXYNOS4412_TMU_DATA \
-       .threshold_falling = 10, \
-       .trigger_levels[0] = 70, \
-       .trigger_levels[1] = 95, \
-       .trigger_levels[2] = 110, \
-       .trigger_levels[3] = 120, \
-       .trigger_enable[0] = true, \
-       .trigger_enable[1] = true, \
-       .trigger_enable[2] = true, \
-       .trigger_enable[3] = false, \
-       .trigger_type[0] = THROTTLE_ACTIVE, \
-       .trigger_type[1] = THROTTLE_ACTIVE, \
-       .trigger_type[2] = SW_TRIP, \
-       .trigger_type[3] = HW_TRIP, \
-       .max_trigger_level = 4, \
-       .non_hw_trigger_levels = 3, \
-       .gain = 8, \
-       .reference_voltage = 16, \
-       .noise_cancel_mode = 4, \
-       .cal_type = TYPE_ONE_POINT_TRIMMING, \
-       .efuse_value = 55, \
-       .min_efuse_value = 40, \
-       .max_efuse_value = 100, \
-       .first_point_trim = 25, \
-       .second_point_trim = 85, \
-       .default_temp_offset = 50, \
-       .freq_tab[0] = { \
-               .freq_clip_max = 1400 * 1000, \
-               .temp_level = 70, \
-       }, \
-       .freq_tab[1] = { \
-               .freq_clip_max = 400 * 1000, \
-               .temp_level = 95, \
-       }, \
-       .freq_tab_count = 2
-
-struct exynos_tmu_init_data const exynos4412_default_tmu_data = {
-       .tmu_data = {
-               {
-                       EXYNOS4412_TMU_DATA,
-                       .type = SOC_ARCH_EXYNOS4412,
-               },
-       },
-       .tmu_count = 1,
-};
-
-struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
-       .tmu_data = {
-               {
-                       EXYNOS4412_TMU_DATA,
-                       .type = SOC_ARCH_EXYNOS5250,
-               },
-       },
-       .tmu_count = 1,
-};
-
-#define __EXYNOS5260_TMU_DATA  \
-       .threshold_falling = 10, \
-       .trigger_levels[0] = 85, \
-       .trigger_levels[1] = 103, \
-       .trigger_levels[2] = 110, \
-       .trigger_levels[3] = 120, \
-       .trigger_enable[0] = true, \
-       .trigger_enable[1] = true, \
-       .trigger_enable[2] = true, \
-       .trigger_enable[3] = false, \
-       .trigger_type[0] = THROTTLE_ACTIVE, \
-       .trigger_type[1] = THROTTLE_ACTIVE, \
-       .trigger_type[2] = SW_TRIP, \
-       .trigger_type[3] = HW_TRIP, \
-       .max_trigger_level = 4, \
-       .non_hw_trigger_levels = 3, \
-       .gain = 8, \
-       .reference_voltage = 16, \
-       .noise_cancel_mode = 4, \
-       .cal_type = TYPE_ONE_POINT_TRIMMING, \
-       .efuse_value = 55, \
-       .min_efuse_value = 40, \
-       .max_efuse_value = 100, \
-       .first_point_trim = 25, \
-       .second_point_trim = 85, \
-       .default_temp_offset = 50, \
-       .freq_tab[0] = { \
-               .freq_clip_max = 800 * 1000, \
-               .temp_level = 85, \
-       }, \
-       .freq_tab[1] = { \
-               .freq_clip_max = 200 * 1000, \
-               .temp_level = 103, \
-       }, \
-       .freq_tab_count = 2, \
-
-#define EXYNOS5260_TMU_DATA \
-       __EXYNOS5260_TMU_DATA \
-       .type = SOC_ARCH_EXYNOS5260
-
-struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
-       .tmu_data = {
-               { EXYNOS5260_TMU_DATA },
-               { EXYNOS5260_TMU_DATA },
-               { EXYNOS5260_TMU_DATA },
-               { EXYNOS5260_TMU_DATA },
-               { EXYNOS5260_TMU_DATA },
-       },
-       .tmu_count = 5,
-};
-
-#define EXYNOS5420_TMU_DATA \
-       __EXYNOS5260_TMU_DATA \
-       .type = SOC_ARCH_EXYNOS5420
-
-#define EXYNOS5420_TMU_DATA_SHARED \
-       __EXYNOS5260_TMU_DATA \
-       .type = SOC_ARCH_EXYNOS5420_TRIMINFO
-
-struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
-       .tmu_data = {
-               { EXYNOS5420_TMU_DATA },
-               { EXYNOS5420_TMU_DATA },
-               { EXYNOS5420_TMU_DATA_SHARED },
-               { EXYNOS5420_TMU_DATA_SHARED },
-               { EXYNOS5420_TMU_DATA_SHARED },
-       },
-       .tmu_count = 5,
-};
-
-#define EXYNOS5440_TMU_DATA \
-       .trigger_levels[0] = 100, \
-       .trigger_levels[4] = 105, \
-       .trigger_enable[0] = 1, \
-       .trigger_type[0] = SW_TRIP, \
-       .trigger_type[4] = HW_TRIP, \
-       .max_trigger_level = 5, \
-       .non_hw_trigger_levels = 1, \
-       .gain = 5, \
-       .reference_voltage = 16, \
-       .noise_cancel_mode = 4, \
-       .cal_type = TYPE_ONE_POINT_TRIMMING, \
-       .efuse_value = 0x5b2d, \
-       .min_efuse_value = 16, \
-       .max_efuse_value = 76, \
-       .first_point_trim = 25, \
-       .second_point_trim = 70, \
-       .default_temp_offset = 25, \
-       .type = SOC_ARCH_EXYNOS5440
-
-struct exynos_tmu_init_data const exynos5440_default_tmu_data = {
-       .tmu_data = {
-               { EXYNOS5440_TMU_DATA } ,
-               { EXYNOS5440_TMU_DATA } ,
-               { EXYNOS5440_TMU_DATA } ,
-       },
-       .tmu_count = 3,
-};
index fdd1f523a1eda4e94110e12dac531c218837dc08..5a0f12d08e8b81cc26b71b7e6c065091a098920f 100644 (file)
@@ -45,7 +45,7 @@
  *    c. if the trend is THERMAL_TREND_RAISE_FULL, do nothing
  *    d. if the trend is THERMAL_TREND_DROP_FULL, use lower limit,
  *       if the cooling state already equals lower limit,
- *       deactive the thermal instance
+ *       deactivate the thermal instance
  */
 static unsigned long get_target_state(struct thermal_instance *instance,
                                enum thermal_trend trend, bool throttle)
@@ -169,7 +169,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
 }
 
 /**
- * step_wise_throttle - throttles devices asscciated with the given zone
+ * step_wise_throttle - throttles devices associated with the given zone
  * @tz - thermal_zone_device
  * @trip - the trip point
  * @trip_type - type of the trip point
index 5d916c7a216b86829d4f10190787583258f3852e..d2501f01cd03483b3f83a384027275abb5b5c089 100644 (file)
@@ -489,7 +489,7 @@ config SERIAL_MFD_HSU
        select SERIAL_CORE
 
 config SERIAL_MFD_HSU_CONSOLE
-       boolean "Medfile HSU serial console support"
+       bool "Medfile HSU serial console support"
        depends on SERIAL_MFD_HSU=y
        select SERIAL_CORE_CONSOLE
 
index 96539038c03ace1de0181b1f1d772aa10a7e67b2..b454d05be5838e08ecdda4fd60514ad0f444edc9 100644 (file)
@@ -45,7 +45,7 @@ menuconfig USB_GADGET
 if USB_GADGET
 
 config USB_GADGET_DEBUG
-       boolean "Debugging messages (DEVELOPMENT)"
+       bool "Debugging messages (DEVELOPMENT)"
        depends on DEBUG_KERNEL
        help
           Many controller and gadget drivers will print some debugging
@@ -73,7 +73,7 @@ config USB_GADGET_VERBOSE
           production build.
 
 config USB_GADGET_DEBUG_FILES
-       boolean "Debugging information files (DEVELOPMENT)"
+       bool "Debugging information files (DEVELOPMENT)"
        depends on PROC_FS
        help
           Some of the drivers in the "gadget" framework can expose
@@ -84,7 +84,7 @@ config USB_GADGET_DEBUG_FILES
           here.  If in doubt, or to conserve kernel memory, say "N".
 
 config USB_GADGET_DEBUG_FS
-       boolean "Debugging information files in debugfs (DEVELOPMENT)"
+       bool "Debugging information files in debugfs (DEVELOPMENT)"
        depends on DEBUG_FS
        help
           Some of the drivers in the "gadget" framework can expose
@@ -230,7 +230,7 @@ config USB_CONFIGFS
          For more information see Documentation/usb/gadget_configfs.txt.
 
 config USB_CONFIGFS_SERIAL
-       boolean "Generic serial bulk in/out"
+       bool "Generic serial bulk in/out"
        depends on USB_CONFIGFS
        depends on TTY
        select USB_U_SERIAL
@@ -239,7 +239,7 @@ config USB_CONFIGFS_SERIAL
          The function talks to the Linux-USB generic serial driver.
 
 config USB_CONFIGFS_ACM
-       boolean "Abstract Control Model (CDC ACM)"
+       bool "Abstract Control Model (CDC ACM)"
        depends on USB_CONFIGFS
        depends on TTY
        select USB_U_SERIAL
@@ -249,7 +249,7 @@ config USB_CONFIGFS_ACM
          MS-Windows hosts or with the Linux-USB "cdc-acm" driver.
 
 config USB_CONFIGFS_OBEX
-       boolean "Object Exchange Model (CDC OBEX)"
+       bool "Object Exchange Model (CDC OBEX)"
        depends on USB_CONFIGFS
        depends on TTY
        select USB_U_SERIAL
@@ -259,7 +259,7 @@ config USB_CONFIGFS_OBEX
          since the kernel itself doesn't implement the OBEX protocol.
 
 config USB_CONFIGFS_NCM
-       boolean "Network Control Model (CDC NCM)"
+       bool "Network Control Model (CDC NCM)"
        depends on USB_CONFIGFS
        depends on NET
        select USB_U_ETHER
@@ -270,7 +270,7 @@ config USB_CONFIGFS_NCM
          different alignment possibilities.
 
 config USB_CONFIGFS_ECM
-       boolean "Ethernet Control Model (CDC ECM)"
+       bool "Ethernet Control Model (CDC ECM)"
        depends on USB_CONFIGFS
        depends on NET
        select USB_U_ETHER
@@ -282,7 +282,7 @@ config USB_CONFIGFS_ECM
          supported by firmware for smart network devices.
 
 config USB_CONFIGFS_ECM_SUBSET
-       boolean "Ethernet Control Model (CDC ECM) subset"
+       bool "Ethernet Control Model (CDC ECM) subset"
        depends on USB_CONFIGFS
        depends on NET
        select USB_U_ETHER
@@ -323,7 +323,7 @@ config USB_CONFIGFS_EEM
          the host is the same (a usbX device), so the differences are minimal.
 
 config USB_CONFIGFS_PHONET
-       boolean "Phonet protocol"
+       bool "Phonet protocol"
        depends on USB_CONFIGFS
        depends on NET
        depends on PHONET
@@ -333,7 +333,7 @@ config USB_CONFIGFS_PHONET
          The Phonet protocol implementation for USB device.
 
 config USB_CONFIGFS_MASS_STORAGE
-       boolean "Mass storage"
+       bool "Mass storage"
        depends on USB_CONFIGFS
        depends on BLOCK
        select USB_F_MASS_STORAGE
@@ -344,7 +344,7 @@ config USB_CONFIGFS_MASS_STORAGE
          specified as a module parameter or sysfs option.
 
 config USB_CONFIGFS_F_LB_SS
-       boolean "Loopback and sourcesink function (for testing)"
+       bool "Loopback and sourcesink function (for testing)"
        depends on USB_CONFIGFS
        select USB_F_SS_LB
        help
@@ -357,7 +357,7 @@ config USB_CONFIGFS_F_LB_SS
          and its driver through a basic set of functional tests.
 
 config USB_CONFIGFS_F_FS
-       boolean "Function filesystem (FunctionFS)"
+       bool "Function filesystem (FunctionFS)"
        depends on USB_CONFIGFS
        select USB_F_FS
        help
@@ -369,7 +369,7 @@ config USB_CONFIGFS_F_FS
          mass storage) and other are implemented in user space.
 
 config USB_CONFIGFS_F_UAC1
-       boolean "Audio Class 1.0"
+       bool "Audio Class 1.0"
        depends on USB_CONFIGFS
        depends on SND
        select USB_LIBCOMPOSITE
@@ -382,7 +382,7 @@ config USB_CONFIGFS_F_UAC1
          on the device.
 
 config USB_CONFIGFS_F_UAC2
-       boolean "Audio Class 2.0"
+       bool "Audio Class 2.0"
        depends on USB_CONFIGFS
        depends on SND
        select USB_LIBCOMPOSITE
@@ -400,7 +400,7 @@ config USB_CONFIGFS_F_UAC2
          wants as audio data to the USB Host.
 
 config USB_CONFIGFS_F_MIDI
-       boolean "MIDI function"
+       bool "MIDI function"
        depends on USB_CONFIGFS
        depends on SND
        select USB_LIBCOMPOSITE
@@ -414,7 +414,7 @@ config USB_CONFIGFS_F_MIDI
          ALSA's aconnect utility etc.
 
 config USB_CONFIGFS_F_HID
-       boolean "HID function"
+       bool "HID function"
        depends on USB_CONFIGFS
        select USB_F_HID
        help
index fd48ef3af4eb76f6ce34314cfced2ca305e395c5..113c87e22117d432708001bec07957678267ea46 100644 (file)
@@ -40,7 +40,7 @@ config USB_ZERO
          dynamically linked module called "g_zero".
 
 config USB_ZERO_HNPTEST
-       boolean "HNP Test Device"
+       bool "HNP Test Device"
        depends on USB_ZERO && USB_OTG
        help
          You can configure this device to enumerate using the device
index 366e551aeff0bd11084ac89e4b131b6f6953369d..9a3a6b00391aa41042836d78d1b88eed27b9ee73 100644 (file)
@@ -199,7 +199,7 @@ config USB_S3C2410
          S3C2440 processors.
 
 config USB_S3C2410_DEBUG
-       boolean "S3C2410 udc debug messages"
+       bool "S3C2410 udc debug messages"
        depends on USB_S3C2410
 
 config USB_S3C_HSUDC
@@ -288,7 +288,7 @@ config USB_NET2272
          gadget drivers to also be dynamically linked.
 
 config USB_NET2272_DMA
-       boolean "Support external DMA controller"
+       bool "Support external DMA controller"
        depends on USB_NET2272 && HAS_DMA
        help
          The NET2272 part can optionally support an external DMA
index c6d0c8e745b976232301e55493b45756d5635fb1..52d3d58252e1fd4a3702705a8d12f57d81942ca2 100644 (file)
@@ -119,7 +119,7 @@ config TAHVO_USB
 
 config TAHVO_USB_HOST_BY_DEFAULT
        depends on TAHVO_USB
-       boolean "Device in USB host mode by default"
+       bool "Device in USB host mode by default"
        help
          Say Y here, if you want the device to enter USB host mode
          by default on bootup.
index 7cc0122a18cecbb7ef45cf8e438112ec2fb4ff00..f8a186381ae8726887657c6c868a8b2ceeef2e54 100644 (file)
@@ -239,9 +239,12 @@ static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
 
                        return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
                }
-       } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX)
+       } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
                if (pci_is_pcie(vdev->pdev))
                        return 1;
+       } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
+               return 1;
+       }
 
        return 0;
 }
@@ -464,6 +467,7 @@ static long vfio_pci_ioctl(void *device_data,
 
                switch (info.index) {
                case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
+               case VFIO_PCI_REQ_IRQ_INDEX:
                        break;
                case VFIO_PCI_ERR_IRQ_INDEX:
                        if (pci_is_pcie(vdev->pdev))
@@ -828,6 +832,20 @@ static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
                               req_len, vma->vm_page_prot);
 }
 
+static void vfio_pci_request(void *device_data, unsigned int count)
+{
+       struct vfio_pci_device *vdev = device_data;
+
+       mutex_lock(&vdev->igate);
+
+       if (vdev->req_trigger) {
+               dev_dbg(&vdev->pdev->dev, "Requesting device from user\n");
+               eventfd_signal(vdev->req_trigger, 1);
+       }
+
+       mutex_unlock(&vdev->igate);
+}
+
 static const struct vfio_device_ops vfio_pci_ops = {
        .name           = "vfio-pci",
        .open           = vfio_pci_open,
@@ -836,6 +854,7 @@ static const struct vfio_device_ops vfio_pci_ops = {
        .read           = vfio_pci_read,
        .write          = vfio_pci_write,
        .mmap           = vfio_pci_mmap,
+       .request        = vfio_pci_request,
 };
 
 static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
index e8d695b3f54e0fdbf74e567dc5ae109cb8ea5da2..f88bfdf5b6a036a6bf1aae3b8abe3ec6d944ffe0 100644 (file)
@@ -763,46 +763,70 @@ static int vfio_pci_set_msi_trigger(struct vfio_pci_device *vdev,
        return 0;
 }
 
-static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
-                                   unsigned index, unsigned start,
-                                   unsigned count, uint32_t flags, void *data)
+static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx,
+                                          uint32_t flags, void *data)
 {
        int32_t fd = *(int32_t *)data;
 
-       if ((index != VFIO_PCI_ERR_IRQ_INDEX) ||
-           !(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
+       if (!(flags & VFIO_IRQ_SET_DATA_TYPE_MASK))
                return -EINVAL;
 
        /* DATA_NONE/DATA_BOOL enables loopback testing */
        if (flags & VFIO_IRQ_SET_DATA_NONE) {
-               if (vdev->err_trigger)
-                       eventfd_signal(vdev->err_trigger, 1);
+               if (*ctx)
+                       eventfd_signal(*ctx, 1);
                return 0;
        } else if (flags & VFIO_IRQ_SET_DATA_BOOL) {
                uint8_t trigger = *(uint8_t *)data;
-               if (trigger && vdev->err_trigger)
-                       eventfd_signal(vdev->err_trigger, 1);
+               if (trigger && *ctx)
+                       eventfd_signal(*ctx, 1);
                return 0;
        }
 
        /* Handle SET_DATA_EVENTFD */
        if (fd == -1) {
-               if (vdev->err_trigger)
-                       eventfd_ctx_put(vdev->err_trigger);
-               vdev->err_trigger = NULL;
+               if (*ctx)
+                       eventfd_ctx_put(*ctx);
+               *ctx = NULL;
                return 0;
        } else if (fd >= 0) {
                struct eventfd_ctx *efdctx;
                efdctx = eventfd_ctx_fdget(fd);
                if (IS_ERR(efdctx))
                        return PTR_ERR(efdctx);
-               if (vdev->err_trigger)
-                       eventfd_ctx_put(vdev->err_trigger);
-               vdev->err_trigger = efdctx;
+               if (*ctx)
+                       eventfd_ctx_put(*ctx);
+               *ctx = efdctx;
                return 0;
        } else
                return -EINVAL;
 }
+
+static int vfio_pci_set_err_trigger(struct vfio_pci_device *vdev,
+                                   unsigned index, unsigned start,
+                                   unsigned count, uint32_t flags, void *data)
+{
+       if (index != VFIO_PCI_ERR_IRQ_INDEX)
+               return -EINVAL;
+
+       /*
+        * We should sanitize start & count, but that wasn't caught
+        * originally, so this IRQ index must forever ignore them :-(
+        */
+
+       return vfio_pci_set_ctx_trigger_single(&vdev->err_trigger, flags, data);
+}
+
+static int vfio_pci_set_req_trigger(struct vfio_pci_device *vdev,
+                                   unsigned index, unsigned start,
+                                   unsigned count, uint32_t flags, void *data)
+{
+       if (index != VFIO_PCI_REQ_IRQ_INDEX || start != 0 || count != 1)
+               return -EINVAL;
+
+       return vfio_pci_set_ctx_trigger_single(&vdev->req_trigger, flags, data);
+}
+
 int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
                            unsigned index, unsigned start, unsigned count,
                            void *data)
@@ -844,6 +868,12 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags,
                                func = vfio_pci_set_err_trigger;
                        break;
                }
+       case VFIO_PCI_REQ_IRQ_INDEX:
+               switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
+               case VFIO_IRQ_SET_ACTION_TRIGGER:
+                       func = vfio_pci_set_req_trigger;
+                       break;
+               }
        }
 
        if (!func)
index 671c17a6e6d029dfdffe5d7243150ed757e44cf4..c9f9b323f152733685f9dafc67793472921658ec 100644 (file)
@@ -58,6 +58,7 @@ struct vfio_pci_device {
        struct pci_saved_state  *pci_saved_state;
        int                     refcnt;
        struct eventfd_ctx      *err_trigger;
+       struct eventfd_ctx      *req_trigger;
 };
 
 #define is_intx(vdev) (vdev->irq_type == VFIO_PCI_INTX_IRQ_INDEX)
index f018d8d0f975360a091339699d12348c5c93a12b..4cde8550144406c02715448b8fbc4a88b538189b 100644 (file)
@@ -63,6 +63,11 @@ struct vfio_container {
        void                            *iommu_data;
 };
 
+struct vfio_unbound_dev {
+       struct device                   *dev;
+       struct list_head                unbound_next;
+};
+
 struct vfio_group {
        struct kref                     kref;
        int                             minor;
@@ -75,6 +80,8 @@ struct vfio_group {
        struct notifier_block           nb;
        struct list_head                vfio_next;
        struct list_head                container_next;
+       struct list_head                unbound_list;
+       struct mutex                    unbound_lock;
        atomic_t                        opened;
 };
 
@@ -204,6 +211,8 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
        kref_init(&group->kref);
        INIT_LIST_HEAD(&group->device_list);
        mutex_init(&group->device_lock);
+       INIT_LIST_HEAD(&group->unbound_list);
+       mutex_init(&group->unbound_lock);
        atomic_set(&group->container_users, 0);
        atomic_set(&group->opened, 0);
        group->iommu_group = iommu_group;
@@ -264,13 +273,22 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group)
 static void vfio_group_release(struct kref *kref)
 {
        struct vfio_group *group = container_of(kref, struct vfio_group, kref);
+       struct vfio_unbound_dev *unbound, *tmp;
+       struct iommu_group *iommu_group = group->iommu_group;
 
        WARN_ON(!list_empty(&group->device_list));
 
+       list_for_each_entry_safe(unbound, tmp,
+                                &group->unbound_list, unbound_next) {
+               list_del(&unbound->unbound_next);
+               kfree(unbound);
+       }
+
        device_destroy(vfio.class, MKDEV(MAJOR(vfio.group_devt), group->minor));
        list_del(&group->vfio_next);
        vfio_free_group_minor(group->minor);
        vfio_group_unlock_and_free(group);
+       iommu_group_put(iommu_group);
 }
 
 static void vfio_group_put(struct vfio_group *group)
@@ -440,17 +458,36 @@ static bool vfio_whitelisted_driver(struct device_driver *drv)
 }
 
 /*
- * A vfio group is viable for use by userspace if all devices are either
- * driver-less or bound to a vfio or whitelisted driver.  We test the
- * latter by the existence of a struct vfio_device matching the dev.
+ * A vfio group is viable for use by userspace if all devices are in
+ * one of the following states:
+ *  - driver-less
+ *  - bound to a vfio driver
+ *  - bound to a whitelisted driver
+ *
+ * We use two methods to determine whether a device is bound to a vfio
+ * driver.  The first is to test whether the device exists in the vfio
+ * group.  The second is to test if the device exists on the group
+ * unbound_list, indicating it's in the middle of transitioning from
+ * a vfio driver to driver-less.
  */
 static int vfio_dev_viable(struct device *dev, void *data)
 {
        struct vfio_group *group = data;
        struct vfio_device *device;
        struct device_driver *drv = ACCESS_ONCE(dev->driver);
+       struct vfio_unbound_dev *unbound;
+       int ret = -EINVAL;
 
-       if (!drv || vfio_whitelisted_driver(drv))
+       mutex_lock(&group->unbound_lock);
+       list_for_each_entry(unbound, &group->unbound_list, unbound_next) {
+               if (dev == unbound->dev) {
+                       ret = 0;
+                       break;
+               }
+       }
+       mutex_unlock(&group->unbound_lock);
+
+       if (!ret || !drv || vfio_whitelisted_driver(drv))
                return 0;
 
        device = vfio_group_get_device(group, dev);
@@ -459,7 +496,7 @@ static int vfio_dev_viable(struct device *dev, void *data)
                return 0;
        }
 
-       return -EINVAL;
+       return ret;
 }
 
 /**
@@ -501,6 +538,7 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
 {
        struct vfio_group *group = container_of(nb, struct vfio_group, nb);
        struct device *dev = data;
+       struct vfio_unbound_dev *unbound;
 
        /*
         * Need to go through a group_lock lookup to get a reference or we
@@ -550,6 +588,17 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb,
                 * stop the system to maintain isolation.  At a minimum, we'd
                 * want a toggle to disable driver auto probe for this device.
                 */
+
+               mutex_lock(&group->unbound_lock);
+               list_for_each_entry(unbound,
+                                   &group->unbound_list, unbound_next) {
+                       if (dev == unbound->dev) {
+                               list_del(&unbound->unbound_next);
+                               kfree(unbound);
+                               break;
+                       }
+               }
+               mutex_unlock(&group->unbound_lock);
                break;
        }
 
@@ -578,6 +627,12 @@ int vfio_add_group_dev(struct device *dev,
                        iommu_group_put(iommu_group);
                        return PTR_ERR(group);
                }
+       } else {
+               /*
+                * A found vfio_group already holds a reference to the
+                * iommu_group.  A created vfio_group keeps the reference.
+                */
+               iommu_group_put(iommu_group);
        }
 
        device = vfio_group_get_device(group, dev);
@@ -586,21 +641,19 @@ int vfio_add_group_dev(struct device *dev,
                     dev_name(dev), iommu_group_id(iommu_group));
                vfio_device_put(device);
                vfio_group_put(group);
-               iommu_group_put(iommu_group);
                return -EBUSY;
        }
 
        device = vfio_group_create_device(group, dev, ops, device_data);
        if (IS_ERR(device)) {
                vfio_group_put(group);
-               iommu_group_put(iommu_group);
                return PTR_ERR(device);
        }
 
        /*
-        * Added device holds reference to iommu_group and vfio_device
-        * (which in turn holds reference to vfio_group).  Drop extra
-        * group reference used while acquiring device.
+        * Drop all but the vfio_device reference.  The vfio_device holds
+        * a reference to the vfio_group, which holds a reference to the
+        * iommu_group.
         */
        vfio_group_put(group);
 
@@ -655,8 +708,9 @@ void *vfio_del_group_dev(struct device *dev)
 {
        struct vfio_device *device = dev_get_drvdata(dev);
        struct vfio_group *group = device->group;
-       struct iommu_group *iommu_group = group->iommu_group;
        void *device_data = device->device_data;
+       struct vfio_unbound_dev *unbound;
+       unsigned int i = 0;
 
        /*
         * The group exists so long as we have a device reference.  Get
@@ -664,14 +718,49 @@ void *vfio_del_group_dev(struct device *dev)
         */
        vfio_group_get(group);
 
+       /*
+        * When the device is removed from the group, the group suddenly
+        * becomes non-viable; the device has a driver (until the unbind
+        * completes), but it's not present in the group.  This is bad news
+        * for any external users that need to re-acquire a group reference
+        * in order to match and release their existing reference.  To
+        * solve this, we track such devices on the unbound_list to bridge
+        * the gap until they're fully unbound.
+        */
+       unbound = kzalloc(sizeof(*unbound), GFP_KERNEL);
+       if (unbound) {
+               unbound->dev = dev;
+               mutex_lock(&group->unbound_lock);
+               list_add(&unbound->unbound_next, &group->unbound_list);
+               mutex_unlock(&group->unbound_lock);
+       }
+       WARN_ON(!unbound);
+
        vfio_device_put(device);
 
-       /* TODO send a signal to encourage this to be released */
-       wait_event(vfio.release_q, !vfio_dev_present(group, dev));
+       /*
+        * If the device is still present in the group after the above
+        * 'put', then it is in use and we need to request it from the
+        * bus driver.  The driver may in turn need to request the
+        * device from the user.  We send the request on an arbitrary
+        * interval with counter to allow the driver to take escalating
+        * measures to release the device if it has the ability to do so.
+        */
+       do {
+               device = vfio_group_get_device(group, dev);
+               if (!device)
+                       break;
 
-       vfio_group_put(group);
+               if (device->ops->request)
+                       device->ops->request(device_data, i++);
 
-       iommu_group_put(iommu_group);
+               vfio_device_put(device);
+
+       } while (wait_event_interruptible_timeout(vfio.release_q,
+                                                 !vfio_dev_present(group, dev),
+                                                 HZ * 10) <= 0);
+
+       vfio_group_put(group);
 
        return device_data;
 }
index 4a9d666f1e9186ed07071c0d909ba167e814998a..57d8c37a002b0e0b8d49891536b8ddfab64afc6b 100644 (file)
@@ -66,6 +66,7 @@ struct vfio_domain {
        struct list_head        next;
        struct list_head        group_list;
        int                     prot;           /* IOMMU_CACHE */
+       bool                    fgsp;           /* Fine-grained super pages */
 };
 
 struct vfio_dma {
@@ -264,6 +265,7 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
        unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
        bool lock_cap = capable(CAP_IPC_LOCK);
        long ret, i;
+       bool rsvd;
 
        if (!current->mm)
                return -ENODEV;
@@ -272,10 +274,9 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
        if (ret)
                return ret;
 
-       if (is_invalid_reserved_pfn(*pfn_base))
-               return 1;
+       rsvd = is_invalid_reserved_pfn(*pfn_base);
 
-       if (!lock_cap && current->mm->locked_vm + 1 > limit) {
+       if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) {
                put_pfn(*pfn_base, prot);
                pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__,
                        limit << PAGE_SHIFT);
@@ -283,7 +284,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
        }
 
        if (unlikely(disable_hugepages)) {
-               vfio_lock_acct(1);
+               if (!rsvd)
+                       vfio_lock_acct(1);
                return 1;
        }
 
@@ -295,12 +297,14 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
                if (ret)
                        break;
 
-               if (pfn != *pfn_base + i || is_invalid_reserved_pfn(pfn)) {
+               if (pfn != *pfn_base + i ||
+                   rsvd != is_invalid_reserved_pfn(pfn)) {
                        put_pfn(pfn, prot);
                        break;
                }
 
-               if (!lock_cap && current->mm->locked_vm + i + 1 > limit) {
+               if (!rsvd && !lock_cap &&
+                   current->mm->locked_vm + i + 1 > limit) {
                        put_pfn(pfn, prot);
                        pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
                                __func__, limit << PAGE_SHIFT);
@@ -308,7 +312,8 @@ static long vfio_pin_pages(unsigned long vaddr, long npage,
                }
        }
 
-       vfio_lock_acct(i);
+       if (!rsvd)
+               vfio_lock_acct(i);
 
        return i;
 }
@@ -346,12 +351,14 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
        domain = d = list_first_entry(&iommu->domain_list,
                                      struct vfio_domain, next);
 
-       list_for_each_entry_continue(d, &iommu->domain_list, next)
+       list_for_each_entry_continue(d, &iommu->domain_list, next) {
                iommu_unmap(d->domain, dma->iova, dma->size);
+               cond_resched();
+       }
 
        while (iova < end) {
-               size_t unmapped;
-               phys_addr_t phys;
+               size_t unmapped, len;
+               phys_addr_t phys, next;
 
                phys = iommu_iova_to_phys(domain->domain, iova);
                if (WARN_ON(!phys)) {
@@ -359,7 +366,19 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
                        continue;
                }
 
-               unmapped = iommu_unmap(domain->domain, iova, PAGE_SIZE);
+               /*
+                * To optimize for fewer iommu_unmap() calls, each of which
+                * may require hardware cache flushing, try to find the
+                * largest contiguous physical memory chunk to unmap.
+                */
+               for (len = PAGE_SIZE;
+                    !domain->fgsp && iova + len < end; len += PAGE_SIZE) {
+                       next = iommu_iova_to_phys(domain->domain, iova + len);
+                       if (next != phys + len)
+                               break;
+               }
+
+               unmapped = iommu_unmap(domain->domain, iova, len);
                if (WARN_ON(!unmapped))
                        break;
 
@@ -367,6 +386,8 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
                                             unmapped >> PAGE_SHIFT,
                                             dma->prot, false);
                iova += unmapped;
+
+               cond_resched();
        }
 
        vfio_lock_acct(-unlocked);
@@ -511,6 +532,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
                            map_try_harder(d, iova, pfn, npage, prot))
                                goto unwind;
                }
+
+               cond_resched();
        }
 
        return 0;
@@ -665,6 +688,39 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
        return 0;
 }
 
+/*
+ * We change our unmap behavior slightly depending on whether the IOMMU
+ * supports fine-grained superpages.  IOMMUs like AMD-Vi will use a superpage
+ * for practically any contiguous power-of-two mapping we give it.  This means
+ * we don't need to look for contiguous chunks ourselves to make unmapping
+ * more efficient.  On IOMMUs with coarse-grained super pages, like Intel VT-d
+ * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks
+ * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when
+ * hugetlbfs is in use.
+ */
+static void vfio_test_domain_fgsp(struct vfio_domain *domain)
+{
+       struct page *pages;
+       int ret, order = get_order(PAGE_SIZE * 2);
+
+       pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
+       if (!pages)
+               return;
+
+       ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2,
+                       IOMMU_READ | IOMMU_WRITE | domain->prot);
+       if (!ret) {
+               size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE);
+
+               if (unmapped == PAGE_SIZE)
+                       iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE);
+               else
+                       domain->fgsp = true;
+       }
+
+       __free_pages(pages, order);
+}
+
 static int vfio_iommu_type1_attach_group(void *iommu_data,
                                         struct iommu_group *iommu_group)
 {
@@ -758,6 +814,8 @@ static int vfio_iommu_type1_attach_group(void *iommu_data,
                }
        }
 
+       vfio_test_domain_fgsp(domain);
+
        /* replay mappings on new domains */
        ret = vfio_iommu_replay(iommu, domain);
        if (ret)
index dc78d87e0fc2c5e14832adf674899823ac223c5f..8d4f3f1ff799fb1d7b4bb5a2184144b515676a2c 100644 (file)
@@ -38,7 +38,6 @@
 #include <linux/miscdevice.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi.h>
-#include <scsi/scsi_tcq.h>
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
 
 #include "vhost.h"
 
-#define TCM_VHOST_VERSION  "v0.1"
-#define TCM_VHOST_NAMELEN 256
-#define TCM_VHOST_MAX_CDB_SIZE 32
-#define TCM_VHOST_DEFAULT_TAGS 256
-#define TCM_VHOST_PREALLOC_SGLS 2048
-#define TCM_VHOST_PREALLOC_UPAGES 2048
-#define TCM_VHOST_PREALLOC_PROT_SGLS 512
+#define VHOST_SCSI_VERSION  "v0.1"
+#define VHOST_SCSI_NAMELEN 256
+#define VHOST_SCSI_MAX_CDB_SIZE 32
+#define VHOST_SCSI_DEFAULT_TAGS 256
+#define VHOST_SCSI_PREALLOC_SGLS 2048
+#define VHOST_SCSI_PREALLOC_UPAGES 2048
+#define VHOST_SCSI_PREALLOC_PROT_SGLS 512
 
 struct vhost_scsi_inflight {
        /* Wait for the flush operation to finish */
@@ -67,11 +66,13 @@ struct vhost_scsi_inflight {
        struct kref kref;
 };
 
-struct tcm_vhost_cmd {
+struct vhost_scsi_cmd {
        /* Descriptor from vhost_get_vq_desc() for virt_queue segment */
        int tvc_vq_desc;
        /* virtio-scsi initiator task attribute */
        int tvc_task_attr;
+       /* virtio-scsi response incoming iovecs */
+       int tvc_in_iovs;
        /* virtio-scsi initiator data direction */
        enum dma_data_direction tvc_data_direction;
        /* Expected data transfer length from virtio-scsi header */
@@ -81,26 +82,26 @@ struct tcm_vhost_cmd {
        /* The number of scatterlists associated with this cmd */
        u32 tvc_sgl_count;
        u32 tvc_prot_sgl_count;
-       /* Saved unpacked SCSI LUN for tcm_vhost_submission_work() */
+       /* Saved unpacked SCSI LUN for vhost_scsi_submission_work() */
        u32 tvc_lun;
        /* Pointer to the SGL formatted memory from virtio-scsi */
        struct scatterlist *tvc_sgl;
        struct scatterlist *tvc_prot_sgl;
        struct page **tvc_upages;
-       /* Pointer to response */
-       struct virtio_scsi_cmd_resp __user *tvc_resp;
+       /* Pointer to response header iovec */
+       struct iovec *tvc_resp_iov;
        /* Pointer to vhost_scsi for our device */
        struct vhost_scsi *tvc_vhost;
        /* Pointer to vhost_virtqueue for the cmd */
        struct vhost_virtqueue *tvc_vq;
        /* Pointer to vhost nexus memory */
-       struct tcm_vhost_nexus *tvc_nexus;
+       struct vhost_scsi_nexus *tvc_nexus;
        /* The TCM I/O descriptor that is accessed via container_of() */
        struct se_cmd tvc_se_cmd;
-       /* work item used for cmwq dispatch to tcm_vhost_submission_work() */
+       /* work item used for cmwq dispatch to vhost_scsi_submission_work() */
        struct work_struct work;
        /* Copy of the incoming SCSI command descriptor block (CDB) */
-       unsigned char tvc_cdb[TCM_VHOST_MAX_CDB_SIZE];
+       unsigned char tvc_cdb[VHOST_SCSI_MAX_CDB_SIZE];
        /* Sense buffer that will be mapped into outgoing status */
        unsigned char tvc_sense_buf[TRANSPORT_SENSE_BUFFER];
        /* Completed commands list, serviced from vhost worker thread */
@@ -109,53 +110,53 @@ struct tcm_vhost_cmd {
        struct vhost_scsi_inflight *inflight;
 };
 
-struct tcm_vhost_nexus {
+struct vhost_scsi_nexus {
        /* Pointer to TCM session for I_T Nexus */
        struct se_session *tvn_se_sess;
 };
 
-struct tcm_vhost_nacl {
+struct vhost_scsi_nacl {
        /* Binary World Wide unique Port Name for Vhost Initiator port */
        u64 iport_wwpn;
        /* ASCII formatted WWPN for Sas Initiator port */
-       char iport_name[TCM_VHOST_NAMELEN];
-       /* Returned by tcm_vhost_make_nodeacl() */
+       char iport_name[VHOST_SCSI_NAMELEN];
+       /* Returned by vhost_scsi_make_nodeacl() */
        struct se_node_acl se_node_acl;
 };
 
-struct tcm_vhost_tpg {
+struct vhost_scsi_tpg {
        /* Vhost port target portal group tag for TCM */
        u16 tport_tpgt;
        /* Used to track number of TPG Port/Lun Links wrt to explict I_T Nexus shutdown */
        int tv_tpg_port_count;
        /* Used for vhost_scsi device reference to tpg_nexus, protected by tv_tpg_mutex */
        int tv_tpg_vhost_count;
-       /* list for tcm_vhost_list */
+       /* list for vhost_scsi_list */
        struct list_head tv_tpg_list;
        /* Used to protect access for tpg_nexus */
        struct mutex tv_tpg_mutex;
        /* Pointer to the TCM VHost I_T Nexus for this TPG endpoint */
-       struct tcm_vhost_nexus *tpg_nexus;
-       /* Pointer back to tcm_vhost_tport */
-       struct tcm_vhost_tport *tport;
-       /* Returned by tcm_vhost_make_tpg() */
+       struct vhost_scsi_nexus *tpg_nexus;
+       /* Pointer back to vhost_scsi_tport */
+       struct vhost_scsi_tport *tport;
+       /* Returned by vhost_scsi_make_tpg() */
        struct se_portal_group se_tpg;
        /* Pointer back to vhost_scsi, protected by tv_tpg_mutex */
        struct vhost_scsi *vhost_scsi;
 };
 
-struct tcm_vhost_tport {
+struct vhost_scsi_tport {
        /* SCSI protocol the tport is providing */
        u8 tport_proto_id;
        /* Binary World Wide unique Port Name for Vhost Target port */
        u64 tport_wwpn;
        /* ASCII formatted WWPN for Vhost Target port */
-       char tport_name[TCM_VHOST_NAMELEN];
-       /* Returned by tcm_vhost_make_tport() */
+       char tport_name[VHOST_SCSI_NAMELEN];
+       /* Returned by vhost_scsi_make_tport() */
        struct se_wwn tport_wwn;
 };
 
-struct tcm_vhost_evt {
+struct vhost_scsi_evt {
        /* event to be sent to guest */
        struct virtio_scsi_event event;
        /* event list, serviced from vhost worker thread */
@@ -171,7 +172,9 @@ enum {
 /* Note: can't set VIRTIO_F_VERSION_1 yet, since that implies ANY_LAYOUT. */
 enum {
        VHOST_SCSI_FEATURES = VHOST_FEATURES | (1ULL << VIRTIO_SCSI_F_HOTPLUG) |
-                                              (1ULL << VIRTIO_SCSI_F_T10_PI)
+                                              (1ULL << VIRTIO_SCSI_F_T10_PI) |
+                                              (1ULL << VIRTIO_F_ANY_LAYOUT) |
+                                              (1ULL << VIRTIO_F_VERSION_1)
 };
 
 #define VHOST_SCSI_MAX_TARGET  256
@@ -195,7 +198,7 @@ struct vhost_scsi_virtqueue {
 
 struct vhost_scsi {
        /* Protected by vhost_scsi->dev.mutex */
-       struct tcm_vhost_tpg **vs_tpg;
+       struct vhost_scsi_tpg **vs_tpg;
        char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
 
        struct vhost_dev dev;
@@ -212,21 +215,21 @@ struct vhost_scsi {
 };
 
 /* Local pointer to allocated TCM configfs fabric module */
-static struct target_fabric_configfs *tcm_vhost_fabric_configfs;
+static struct target_fabric_configfs *vhost_scsi_fabric_configfs;
 
-static struct workqueue_struct *tcm_vhost_workqueue;
+static struct workqueue_struct *vhost_scsi_workqueue;
 
-/* Global spinlock to protect tcm_vhost TPG list for vhost IOCTL access */
-static DEFINE_MUTEX(tcm_vhost_mutex);
-static LIST_HEAD(tcm_vhost_list);
+/* Global spinlock to protect vhost_scsi TPG list for vhost IOCTL access */
+static DEFINE_MUTEX(vhost_scsi_mutex);
+static LIST_HEAD(vhost_scsi_list);
 
-static int iov_num_pages(struct iovec *iov)
+static int iov_num_pages(void __user *iov_base, size_t iov_len)
 {
-       return (PAGE_ALIGN((unsigned long)iov->iov_base + iov->iov_len) -
-              ((unsigned long)iov->iov_base & PAGE_MASK)) >> PAGE_SHIFT;
+       return (PAGE_ALIGN((unsigned long)iov_base + iov_len) -
+              ((unsigned long)iov_base & PAGE_MASK)) >> PAGE_SHIFT;
 }
 
-static void tcm_vhost_done_inflight(struct kref *kref)
+static void vhost_scsi_done_inflight(struct kref *kref)
 {
        struct vhost_scsi_inflight *inflight;
 
@@ -234,7 +237,7 @@ static void tcm_vhost_done_inflight(struct kref *kref)
        complete(&inflight->comp);
 }
 
-static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
+static void vhost_scsi_init_inflight(struct vhost_scsi *vs,
                                    struct vhost_scsi_inflight *old_inflight[])
 {
        struct vhost_scsi_inflight *new_inflight;
@@ -262,7 +265,7 @@ static void tcm_vhost_init_inflight(struct vhost_scsi *vs,
 }
 
 static struct vhost_scsi_inflight *
-tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
+vhost_scsi_get_inflight(struct vhost_virtqueue *vq)
 {
        struct vhost_scsi_inflight *inflight;
        struct vhost_scsi_virtqueue *svq;
@@ -274,31 +277,31 @@ tcm_vhost_get_inflight(struct vhost_virtqueue *vq)
        return inflight;
 }
 
-static void tcm_vhost_put_inflight(struct vhost_scsi_inflight *inflight)
+static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight)
 {
-       kref_put(&inflight->kref, tcm_vhost_done_inflight);
+       kref_put(&inflight->kref, vhost_scsi_done_inflight);
 }
 
-static int tcm_vhost_check_true(struct se_portal_group *se_tpg)
+static int vhost_scsi_check_true(struct se_portal_group *se_tpg)
 {
        return 1;
 }
 
-static int tcm_vhost_check_false(struct se_portal_group *se_tpg)
+static int vhost_scsi_check_false(struct se_portal_group *se_tpg)
 {
        return 0;
 }
 
-static char *tcm_vhost_get_fabric_name(void)
+static char *vhost_scsi_get_fabric_name(void)
 {
        return "vhost";
 }
 
-static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+static u8 vhost_scsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -316,37 +319,37 @@ static u8 tcm_vhost_get_fabric_proto_ident(struct se_portal_group *se_tpg)
        return sas_get_fabric_proto_ident(se_tpg);
 }
 
-static char *tcm_vhost_get_fabric_wwn(struct se_portal_group *se_tpg)
+static char *vhost_scsi_get_fabric_wwn(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        return &tport->tport_name[0];
 }
 
-static u16 tcm_vhost_get_tag(struct se_portal_group *se_tpg)
+static u16 vhost_scsi_get_tpgt(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
        return tpg->tport_tpgt;
 }
 
-static u32 tcm_vhost_get_default_depth(struct se_portal_group *se_tpg)
+static u32 vhost_scsi_get_default_depth(struct se_portal_group *se_tpg)
 {
        return 1;
 }
 
 static u32
-tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
+vhost_scsi_get_pr_transport_id(struct se_portal_group *se_tpg,
                              struct se_node_acl *se_nacl,
                              struct t10_pr_registration *pr_reg,
                              int *format_code,
                              unsigned char *buf)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -369,14 +372,14 @@ tcm_vhost_get_pr_transport_id(struct se_portal_group *se_tpg,
 }
 
 static u32
-tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
+vhost_scsi_get_pr_transport_id_len(struct se_portal_group *se_tpg,
                                  struct se_node_acl *se_nacl,
                                  struct t10_pr_registration *pr_reg,
                                  int *format_code)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -399,14 +402,14 @@ tcm_vhost_get_pr_transport_id_len(struct se_portal_group *se_tpg,
 }
 
 static char *
-tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
+vhost_scsi_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
                                    const char *buf,
                                    u32 *out_tid_len,
                                    char **port_nexus_ptr)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport = tpg->tport;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport = tpg->tport;
 
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -429,13 +432,13 @@ tcm_vhost_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
 }
 
 static struct se_node_acl *
-tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
+vhost_scsi_alloc_fabric_acl(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_nacl *nacl;
+       struct vhost_scsi_nacl *nacl;
 
-       nacl = kzalloc(sizeof(struct tcm_vhost_nacl), GFP_KERNEL);
+       nacl = kzalloc(sizeof(struct vhost_scsi_nacl), GFP_KERNEL);
        if (!nacl) {
-               pr_err("Unable to allocate struct tcm_vhost_nacl\n");
+               pr_err("Unable to allocate struct vhost_scsi_nacl\n");
                return NULL;
        }
 
@@ -443,24 +446,24 @@ tcm_vhost_alloc_fabric_acl(struct se_portal_group *se_tpg)
 }
 
 static void
-tcm_vhost_release_fabric_acl(struct se_portal_group *se_tpg,
+vhost_scsi_release_fabric_acl(struct se_portal_group *se_tpg,
                             struct se_node_acl *se_nacl)
 {
-       struct tcm_vhost_nacl *nacl = container_of(se_nacl,
-                       struct tcm_vhost_nacl, se_node_acl);
+       struct vhost_scsi_nacl *nacl = container_of(se_nacl,
+                       struct vhost_scsi_nacl, se_node_acl);
        kfree(nacl);
 }
 
-static u32 tcm_vhost_tpg_get_inst_index(struct se_portal_group *se_tpg)
+static u32 vhost_scsi_tpg_get_inst_index(struct se_portal_group *se_tpg)
 {
        return 1;
 }
 
-static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
+static void vhost_scsi_release_cmd(struct se_cmd *se_cmd)
 {
-       struct tcm_vhost_cmd *tv_cmd = container_of(se_cmd,
-                               struct tcm_vhost_cmd, tvc_se_cmd);
-       struct se_session *se_sess = se_cmd->se_sess;
+       struct vhost_scsi_cmd *tv_cmd = container_of(se_cmd,
+                               struct vhost_scsi_cmd, tvc_se_cmd);
+       struct se_session *se_sess = tv_cmd->tvc_nexus->tvn_se_sess;
        int i;
 
        if (tv_cmd->tvc_sgl_count) {
@@ -472,53 +475,53 @@ static void tcm_vhost_release_cmd(struct se_cmd *se_cmd)
                        put_page(sg_page(&tv_cmd->tvc_prot_sgl[i]));
        }
 
-       tcm_vhost_put_inflight(tv_cmd->inflight);
+       vhost_scsi_put_inflight(tv_cmd->inflight);
        percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag);
 }
 
-static int tcm_vhost_shutdown_session(struct se_session *se_sess)
+static int vhost_scsi_shutdown_session(struct se_session *se_sess)
 {
        return 0;
 }
 
-static void tcm_vhost_close_session(struct se_session *se_sess)
+static void vhost_scsi_close_session(struct se_session *se_sess)
 {
        return;
 }
 
-static u32 tcm_vhost_sess_get_index(struct se_session *se_sess)
+static u32 vhost_scsi_sess_get_index(struct se_session *se_sess)
 {
        return 0;
 }
 
-static int tcm_vhost_write_pending(struct se_cmd *se_cmd)
+static int vhost_scsi_write_pending(struct se_cmd *se_cmd)
 {
        /* Go ahead and process the write immediately */
        target_execute_cmd(se_cmd);
        return 0;
 }
 
-static int tcm_vhost_write_pending_status(struct se_cmd *se_cmd)
+static int vhost_scsi_write_pending_status(struct se_cmd *se_cmd)
 {
        return 0;
 }
 
-static void tcm_vhost_set_default_node_attrs(struct se_node_acl *nacl)
+static void vhost_scsi_set_default_node_attrs(struct se_node_acl *nacl)
 {
        return;
 }
 
-static u32 tcm_vhost_get_task_tag(struct se_cmd *se_cmd)
+static u32 vhost_scsi_get_task_tag(struct se_cmd *se_cmd)
 {
        return 0;
 }
 
-static int tcm_vhost_get_cmd_state(struct se_cmd *se_cmd)
+static int vhost_scsi_get_cmd_state(struct se_cmd *se_cmd)
 {
        return 0;
 }
 
-static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
+static void vhost_scsi_complete_cmd(struct vhost_scsi_cmd *cmd)
 {
        struct vhost_scsi *vs = cmd->tvc_vhost;
 
@@ -527,44 +530,44 @@ static void vhost_scsi_complete_cmd(struct tcm_vhost_cmd *cmd)
        vhost_work_queue(&vs->dev, &vs->vs_completion_work);
 }
 
-static int tcm_vhost_queue_data_in(struct se_cmd *se_cmd)
+static int vhost_scsi_queue_data_in(struct se_cmd *se_cmd)
 {
-       struct tcm_vhost_cmd *cmd = container_of(se_cmd,
-                               struct tcm_vhost_cmd, tvc_se_cmd);
+       struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+                               struct vhost_scsi_cmd, tvc_se_cmd);
        vhost_scsi_complete_cmd(cmd);
        return 0;
 }
 
-static int tcm_vhost_queue_status(struct se_cmd *se_cmd)
+static int vhost_scsi_queue_status(struct se_cmd *se_cmd)
 {
-       struct tcm_vhost_cmd *cmd = container_of(se_cmd,
-                               struct tcm_vhost_cmd, tvc_se_cmd);
+       struct vhost_scsi_cmd *cmd = container_of(se_cmd,
+                               struct vhost_scsi_cmd, tvc_se_cmd);
        vhost_scsi_complete_cmd(cmd);
        return 0;
 }
 
-static void tcm_vhost_queue_tm_rsp(struct se_cmd *se_cmd)
+static void vhost_scsi_queue_tm_rsp(struct se_cmd *se_cmd)
 {
        return;
 }
 
-static void tcm_vhost_aborted_task(struct se_cmd *se_cmd)
+static void vhost_scsi_aborted_task(struct se_cmd *se_cmd)
 {
        return;
 }
 
-static void tcm_vhost_free_evt(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+static void vhost_scsi_free_evt(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 {
        vs->vs_events_nr--;
        kfree(evt);
 }
 
-static struct tcm_vhost_evt *
-tcm_vhost_allocate_evt(struct vhost_scsi *vs,
+static struct vhost_scsi_evt *
+vhost_scsi_allocate_evt(struct vhost_scsi *vs,
                       u32 event, u32 reason)
 {
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
-       struct tcm_vhost_evt *evt;
+       struct vhost_scsi_evt *evt;
 
        if (vs->vs_events_nr > VHOST_SCSI_MAX_EVENT) {
                vs->vs_events_missed = true;
@@ -573,7 +576,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
 
        evt = kzalloc(sizeof(*evt), GFP_KERNEL);
        if (!evt) {
-               vq_err(vq, "Failed to allocate tcm_vhost_evt\n");
+               vq_err(vq, "Failed to allocate vhost_scsi_evt\n");
                vs->vs_events_missed = true;
                return NULL;
        }
@@ -585,7 +588,7 @@ tcm_vhost_allocate_evt(struct vhost_scsi *vs,
        return evt;
 }
 
-static void vhost_scsi_free_cmd(struct tcm_vhost_cmd *cmd)
+static void vhost_scsi_free_cmd(struct vhost_scsi_cmd *cmd)
 {
        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
 
@@ -600,7 +603,7 @@ static int vhost_scsi_check_stop_free(struct se_cmd *se_cmd)
 }
 
 static void
-tcm_vhost_do_evt_work(struct vhost_scsi *vs, struct tcm_vhost_evt *evt)
+vhost_scsi_do_evt_work(struct vhost_scsi *vs, struct vhost_scsi_evt *evt)
 {
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
        struct virtio_scsi_event *event = &evt->event;
@@ -646,24 +649,24 @@ again:
        if (!ret)
                vhost_add_used_and_signal(&vs->dev, vq, head, 0);
        else
-               vq_err(vq, "Faulted on tcm_vhost_send_event\n");
+               vq_err(vq, "Faulted on vhost_scsi_send_event\n");
 }
 
-static void tcm_vhost_evt_work(struct vhost_work *work)
+static void vhost_scsi_evt_work(struct vhost_work *work)
 {
        struct vhost_scsi *vs = container_of(work, struct vhost_scsi,
                                        vs_event_work);
        struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
-       struct tcm_vhost_evt *evt;
+       struct vhost_scsi_evt *evt;
        struct llist_node *llnode;
 
        mutex_lock(&vq->mutex);
        llnode = llist_del_all(&vs->vs_event_list);
        while (llnode) {
-               evt = llist_entry(llnode, struct tcm_vhost_evt, list);
+               evt = llist_entry(llnode, struct vhost_scsi_evt, list);
                llnode = llist_next(llnode);
-               tcm_vhost_do_evt_work(vs, evt);
-               tcm_vhost_free_evt(vs, evt);
+               vhost_scsi_do_evt_work(vs, evt);
+               vhost_scsi_free_evt(vs, evt);
        }
        mutex_unlock(&vq->mutex);
 }
@@ -679,15 +682,16 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                                        vs_completion_work);
        DECLARE_BITMAP(signal, VHOST_SCSI_MAX_VQ);
        struct virtio_scsi_cmd_resp v_rsp;
-       struct tcm_vhost_cmd *cmd;
+       struct vhost_scsi_cmd *cmd;
        struct llist_node *llnode;
        struct se_cmd *se_cmd;
+       struct iov_iter iov_iter;
        int ret, vq;
 
        bitmap_zero(signal, VHOST_SCSI_MAX_VQ);
        llnode = llist_del_all(&vs->vs_completion_list);
        while (llnode) {
-               cmd = llist_entry(llnode, struct tcm_vhost_cmd,
+               cmd = llist_entry(llnode, struct vhost_scsi_cmd,
                                     tvc_completion_list);
                llnode = llist_next(llnode);
                se_cmd = &cmd->tvc_se_cmd;
@@ -703,8 +707,11 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                                                 se_cmd->scsi_sense_length);
                memcpy(v_rsp.sense, cmd->tvc_sense_buf,
                       se_cmd->scsi_sense_length);
-               ret = copy_to_user(cmd->tvc_resp, &v_rsp, sizeof(v_rsp));
-               if (likely(ret == 0)) {
+
+               iov_iter_init(&iov_iter, READ, cmd->tvc_resp_iov,
+                             cmd->tvc_in_iovs, sizeof(v_rsp));
+               ret = copy_to_iter(&v_rsp, sizeof(v_rsp), &iov_iter);
+               if (likely(ret == sizeof(v_rsp))) {
                        struct vhost_scsi_virtqueue *q;
                        vhost_add_used(cmd->tvc_vq, cmd->tvc_vq_desc, 0);
                        q = container_of(cmd->tvc_vq, struct vhost_scsi_virtqueue, vq);
@@ -722,13 +729,13 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work)
                vhost_signal(&vs->dev, &vs->vqs[vq].vq);
 }
 
-static struct tcm_vhost_cmd *
-vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
+static struct vhost_scsi_cmd *
+vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct vhost_scsi_tpg *tpg,
                   unsigned char *cdb, u64 scsi_tag, u16 lun, u8 task_attr,
                   u32 exp_data_len, int data_direction)
 {
-       struct tcm_vhost_cmd *cmd;
-       struct tcm_vhost_nexus *tv_nexus;
+       struct vhost_scsi_cmd *cmd;
+       struct vhost_scsi_nexus *tv_nexus;
        struct se_session *se_sess;
        struct scatterlist *sg, *prot_sg;
        struct page **pages;
@@ -736,22 +743,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
 
        tv_nexus = tpg->tpg_nexus;
        if (!tv_nexus) {
-               pr_err("Unable to locate active struct tcm_vhost_nexus\n");
+               pr_err("Unable to locate active struct vhost_scsi_nexus\n");
                return ERR_PTR(-EIO);
        }
        se_sess = tv_nexus->tvn_se_sess;
 
        tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
        if (tag < 0) {
-               pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
+               pr_err("Unable to obtain tag for vhost_scsi_cmd\n");
                return ERR_PTR(-ENOMEM);
        }
 
-       cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[tag];
+       cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[tag];
        sg = cmd->tvc_sgl;
        prot_sg = cmd->tvc_prot_sgl;
        pages = cmd->tvc_upages;
-       memset(cmd, 0, sizeof(struct tcm_vhost_cmd));
+       memset(cmd, 0, sizeof(struct vhost_scsi_cmd));
 
        cmd->tvc_sgl = sg;
        cmd->tvc_prot_sgl = prot_sg;
@@ -763,9 +770,9 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
        cmd->tvc_exp_data_len = exp_data_len;
        cmd->tvc_data_direction = data_direction;
        cmd->tvc_nexus = tv_nexus;
-       cmd->inflight = tcm_vhost_get_inflight(vq);
+       cmd->inflight = vhost_scsi_get_inflight(vq);
 
-       memcpy(cmd->tvc_cdb, cdb, TCM_VHOST_MAX_CDB_SIZE);
+       memcpy(cmd->tvc_cdb, cdb, VHOST_SCSI_MAX_CDB_SIZE);
 
        return cmd;
 }
@@ -776,29 +783,22 @@ vhost_scsi_get_tag(struct vhost_virtqueue *vq, struct tcm_vhost_tpg *tpg,
  * Returns the number of scatterlist entries used or -errno on error.
  */
 static int
-vhost_scsi_map_to_sgl(struct tcm_vhost_cmd *tv_cmd,
+vhost_scsi_map_to_sgl(struct vhost_scsi_cmd *cmd,
+                     void __user *ptr,
+                     size_t len,
                      struct scatterlist *sgl,
-                     unsigned int sgl_count,
-                     struct iovec *iov,
-                     struct page **pages,
                      bool write)
 {
-       unsigned int npages = 0, pages_nr, offset, nbytes;
+       unsigned int npages = 0, offset, nbytes;
+       unsigned int pages_nr = iov_num_pages(ptr, len);
        struct scatterlist *sg = sgl;
-       void __user *ptr = iov->iov_base;
-       size_t len = iov->iov_len;
+       struct page **pages = cmd->tvc_upages;
        int ret, i;
 
-       pages_nr = iov_num_pages(iov);
-       if (pages_nr > sgl_count) {
-               pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
-                      " sgl_count: %u\n", pages_nr, sgl_count);
-               return -ENOBUFS;
-       }
-       if (pages_nr > TCM_VHOST_PREALLOC_UPAGES) {
+       if (pages_nr > VHOST_SCSI_PREALLOC_UPAGES) {
                pr_err("vhost_scsi_map_to_sgl() pages_nr: %u greater than"
-                      " preallocated TCM_VHOST_PREALLOC_UPAGES: %u\n",
-                       pages_nr, TCM_VHOST_PREALLOC_UPAGES);
+                      " preallocated VHOST_SCSI_PREALLOC_UPAGES: %u\n",
+                       pages_nr, VHOST_SCSI_PREALLOC_UPAGES);
                return -ENOBUFS;
        }
 
@@ -829,84 +829,94 @@ out:
 }
 
 static int
-vhost_scsi_map_iov_to_sgl(struct tcm_vhost_cmd *cmd,
-                         struct iovec *iov,
-                         int niov,
-                         bool write)
+vhost_scsi_calc_sgls(struct iov_iter *iter, size_t bytes, int max_sgls)
 {
-       struct scatterlist *sg = cmd->tvc_sgl;
-       unsigned int sgl_count = 0;
-       int ret, i;
+       int sgl_count = 0;
 
-       for (i = 0; i < niov; i++)
-               sgl_count += iov_num_pages(&iov[i]);
+       if (!iter || !iter->iov) {
+               pr_err("%s: iter->iov is NULL, but expected bytes: %zu"
+                      " present\n", __func__, bytes);
+               return -EINVAL;
+       }
 
-       if (sgl_count > TCM_VHOST_PREALLOC_SGLS) {
-               pr_err("vhost_scsi_map_iov_to_sgl() sgl_count: %u greater than"
-                       " preallocated TCM_VHOST_PREALLOC_SGLS: %u\n",
-                       sgl_count, TCM_VHOST_PREALLOC_SGLS);
-               return -ENOBUFS;
+       sgl_count = iov_iter_npages(iter, 0xffff);
+       if (sgl_count > max_sgls) {
+               pr_err("%s: requested sgl_count: %d exceeds pre-allocated"
+                      " max_sgls: %d\n", __func__, sgl_count, max_sgls);
+               return -EINVAL;
        }
+       return sgl_count;
+}
 
-       pr_debug("%s sg %p sgl_count %u\n", __func__, sg, sgl_count);
-       sg_init_table(sg, sgl_count);
-       cmd->tvc_sgl_count = sgl_count;
+static int
+vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
+                     struct iov_iter *iter,
+                     struct scatterlist *sg, int sg_count)
+{
+       size_t off = iter->iov_offset;
+       int i, ret;
 
-       pr_debug("Mapping iovec %p for %u pages\n", &iov[0], sgl_count);
+       for (i = 0; i < iter->nr_segs; i++) {
+               void __user *base = iter->iov[i].iov_base + off;
+               size_t len = iter->iov[i].iov_len - off;
 
-       for (i = 0; i < niov; i++) {
-               ret = vhost_scsi_map_to_sgl(cmd, sg, sgl_count, &iov[i],
-                                           cmd->tvc_upages, write);
+               ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
                if (ret < 0) {
-                       for (i = 0; i < cmd->tvc_sgl_count; i++)
-                               put_page(sg_page(&cmd->tvc_sgl[i]));
-
-                       cmd->tvc_sgl_count = 0;
+                       for (i = 0; i < sg_count; i++) {
+                               struct page *page = sg_page(&sg[i]);
+                               if (page)
+                                       put_page(page);
+                       }
                        return ret;
                }
                sg += ret;
-               sgl_count -= ret;
+               off = 0;
        }
        return 0;
 }
 
 static int
-vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
-                          struct iovec *iov,
-                          int niov,
-                          bool write)
-{
-       struct scatterlist *prot_sg = cmd->tvc_prot_sgl;
-       unsigned int prot_sgl_count = 0;
-       int ret, i;
-
-       for (i = 0; i < niov; i++)
-               prot_sgl_count += iov_num_pages(&iov[i]);
-
-       if (prot_sgl_count > TCM_VHOST_PREALLOC_PROT_SGLS) {
-               pr_err("vhost_scsi_map_iov_to_prot() sgl_count: %u greater than"
-                       " preallocated TCM_VHOST_PREALLOC_PROT_SGLS: %u\n",
-                       prot_sgl_count, TCM_VHOST_PREALLOC_PROT_SGLS);
-               return -ENOBUFS;
-       }
-
-       pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
-                prot_sg, prot_sgl_count);
-       sg_init_table(prot_sg, prot_sgl_count);
-       cmd->tvc_prot_sgl_count = prot_sgl_count;
-
-       for (i = 0; i < niov; i++) {
-               ret = vhost_scsi_map_to_sgl(cmd, prot_sg, prot_sgl_count, &iov[i],
-                                           cmd->tvc_upages, write);
+vhost_scsi_mapal(struct vhost_scsi_cmd *cmd,
+                size_t prot_bytes, struct iov_iter *prot_iter,
+                size_t data_bytes, struct iov_iter *data_iter)
+{
+       int sgl_count, ret;
+       bool write = (cmd->tvc_data_direction == DMA_FROM_DEVICE);
+
+       if (prot_bytes) {
+               sgl_count = vhost_scsi_calc_sgls(prot_iter, prot_bytes,
+                                                VHOST_SCSI_PREALLOC_PROT_SGLS);
+               if (sgl_count < 0)
+                       return sgl_count;
+
+               sg_init_table(cmd->tvc_prot_sgl, sgl_count);
+               cmd->tvc_prot_sgl_count = sgl_count;
+               pr_debug("%s prot_sg %p prot_sgl_count %u\n", __func__,
+                        cmd->tvc_prot_sgl, cmd->tvc_prot_sgl_count);
+
+               ret = vhost_scsi_iov_to_sgl(cmd, write, prot_iter,
+                                           cmd->tvc_prot_sgl,
+                                           cmd->tvc_prot_sgl_count);
                if (ret < 0) {
-                       for (i = 0; i < cmd->tvc_prot_sgl_count; i++)
-                               put_page(sg_page(&cmd->tvc_prot_sgl[i]));
-
                        cmd->tvc_prot_sgl_count = 0;
                        return ret;
                }
-               prot_sg += ret;
-               prot_sgl_count -= ret;
+       }
+       sgl_count = vhost_scsi_calc_sgls(data_iter, data_bytes,
+                                        VHOST_SCSI_PREALLOC_SGLS);
+       if (sgl_count < 0)
+               return sgl_count;
+
+       sg_init_table(cmd->tvc_sgl, sgl_count);
+       cmd->tvc_sgl_count = sgl_count;
+       pr_debug("%s data_sg %p data_sgl_count %u\n", __func__,
+                 cmd->tvc_sgl, cmd->tvc_sgl_count);
+
+       ret = vhost_scsi_iov_to_sgl(cmd, write, data_iter,
+                                   cmd->tvc_sgl, cmd->tvc_sgl_count);
+       if (ret < 0) {
+               cmd->tvc_sgl_count = 0;
+               return ret;
        }
        return 0;
 }
@@ -928,11 +938,11 @@ static int vhost_scsi_to_tcm_attr(int attr)
        return TCM_SIMPLE_TAG;
 }
 
-static void tcm_vhost_submission_work(struct work_struct *work)
+static void vhost_scsi_submission_work(struct work_struct *work)
 {
-       struct tcm_vhost_cmd *cmd =
-               container_of(work, struct tcm_vhost_cmd, work);
-       struct tcm_vhost_nexus *tv_nexus;
+       struct vhost_scsi_cmd *cmd =
+               container_of(work, struct vhost_scsi_cmd, work);
+       struct vhost_scsi_nexus *tv_nexus;
        struct se_cmd *se_cmd = &cmd->tvc_se_cmd;
        struct scatterlist *sg_ptr, *sg_prot_ptr = NULL;
        int rc;
@@ -986,19 +996,20 @@ vhost_scsi_send_bad_target(struct vhost_scsi *vs,
 static void
 vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 {
-       struct tcm_vhost_tpg **vs_tpg;
+       struct vhost_scsi_tpg **vs_tpg, *tpg;
        struct virtio_scsi_cmd_req v_req;
        struct virtio_scsi_cmd_req_pi v_req_pi;
-       struct tcm_vhost_tpg *tpg;
-       struct tcm_vhost_cmd *cmd;
+       struct vhost_scsi_cmd *cmd;
+       struct iov_iter out_iter, in_iter, prot_iter, data_iter;
        u64 tag;
-       u32 exp_data_len, data_first, data_num, data_direction, prot_first;
-       unsigned out, in, i;
-       int head, ret, data_niov, prot_niov, prot_bytes;
-       size_t req_size;
+       u32 exp_data_len, data_direction;
+       unsigned out, in;
+       int head, ret, prot_bytes;
+       size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
+       size_t out_size, in_size;
        u16 lun;
        u8 *target, *lunp, task_attr;
-       bool hdr_pi;
+       bool t10_pi = vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI);
        void *req, *cdb;
 
        mutex_lock(&vq->mutex);
@@ -1014,10 +1025,10 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
 
        for (;;) {
                head = vhost_get_vq_desc(vq, vq->iov,
-                                       ARRAY_SIZE(vq->iov), &out, &in,
-                                       NULL, NULL);
+                                        ARRAY_SIZE(vq->iov), &out, &in,
+                                        NULL, NULL);
                pr_debug("vhost_get_vq_desc: head: %d, out: %u in: %u\n",
-                                       head, out, in);
+                        head, out, in);
                /* On error, stop handling until the next kick. */
                if (unlikely(head < 0))
                        break;
@@ -1029,113 +1040,134 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                        }
                        break;
                }
-
-               /* FIXME: BIDI operation */
-               if (out == 1 && in == 1) {
-                       data_direction = DMA_NONE;
-                       data_first = 0;
-                       data_num = 0;
-               } else if (out == 1 && in > 1) {
-                       data_direction = DMA_FROM_DEVICE;
-                       data_first = out + 1;
-                       data_num = in - 1;
-               } else if (out > 1 && in == 1) {
-                       data_direction = DMA_TO_DEVICE;
-                       data_first = 1;
-                       data_num = out - 1;
-               } else {
-                       vq_err(vq, "Invalid buffer layout out: %u in: %u\n",
-                                       out, in);
-                       break;
-               }
-
                /*
-                * Check for a sane resp buffer so we can report errors to
-                * the guest.
+                * Check for a sane response buffer so we can report early
+                * errors back to the guest.
                 */
-               if (unlikely(vq->iov[out].iov_len !=
-                                       sizeof(struct virtio_scsi_cmd_resp))) {
-                       vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
-                               " bytes\n", vq->iov[out].iov_len);
+               if (unlikely(vq->iov[out].iov_len < rsp_size)) {
+                       vq_err(vq, "Expecting at least virtio_scsi_cmd_resp"
+                               " size, got %zu bytes\n", vq->iov[out].iov_len);
                        break;
                }
-
-               if (vhost_has_feature(vq, VIRTIO_SCSI_F_T10_PI)) {
+               /*
+                * Setup pointers and values based upon different virtio-scsi
+                * request header if T10_PI is enabled in KVM guest.
+                */
+               if (t10_pi) {
                        req = &v_req_pi;
+                       req_size = sizeof(v_req_pi);
                        lunp = &v_req_pi.lun[0];
                        target = &v_req_pi.lun[1];
-                       req_size = sizeof(v_req_pi);
-                       hdr_pi = true;
                } else {
                        req = &v_req;
+                       req_size = sizeof(v_req);
                        lunp = &v_req.lun[0];
                        target = &v_req.lun[1];
-                       req_size = sizeof(v_req);
-                       hdr_pi = false;
                }
+               /*
+                * FIXME: Not correct for BIDI operation
+                */
+               out_size = iov_length(vq->iov, out);
+               in_size = iov_length(&vq->iov[out], in);
 
-               if (unlikely(vq->iov[0].iov_len < req_size)) {
-                       pr_err("Expecting virtio-scsi header: %zu, got %zu\n",
-                              req_size, vq->iov[0].iov_len);
-                       break;
-               }
-               ret = copy_from_user(req, vq->iov[0].iov_base, req_size);
-               if (unlikely(ret)) {
-                       vq_err(vq, "Faulted on virtio_scsi_cmd_req\n");
-                       break;
-               }
+               /*
+                * Copy over the virtio-scsi request header, which for a
+                * ANY_LAYOUT enabled guest may span multiple iovecs, or a
+                * single iovec may contain both the header + outgoing
+                * WRITE payloads.
+                *
+                * copy_from_iter() will advance out_iter, so that it will
+                * point at the start of the outgoing WRITE payload, if
+                * DMA_TO_DEVICE is set.
+                */
+               iov_iter_init(&out_iter, WRITE, vq->iov, out, out_size);
 
+               ret = copy_from_iter(req, req_size, &out_iter);
+               if (unlikely(ret != req_size)) {
+                       vq_err(vq, "Faulted on copy_from_iter\n");
+                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                       continue;
+               }
                /* virtio-scsi spec requires byte 0 of the lun to be 1 */
                if (unlikely(*lunp != 1)) {
+                       vq_err(vq, "Illegal virtio-scsi lun: %u\n", *lunp);
                        vhost_scsi_send_bad_target(vs, vq, head, out);
                        continue;
                }
 
                tpg = ACCESS_ONCE(vs_tpg[*target]);
-
-               /* Target does not exist, fail the request */
                if (unlikely(!tpg)) {
+                       /* Target does not exist, fail the request */
                        vhost_scsi_send_bad_target(vs, vq, head, out);
                        continue;
                }
-
-               data_niov = data_num;
-               prot_niov = prot_first = prot_bytes = 0;
                /*
-                * Determine if any protection information iovecs are preceeding
-                * the actual data payload, and adjust data_first + data_niov
-                * values accordingly for vhost_scsi_map_iov_to_sgl() below.
+                * Determine data_direction by calculating the total outgoing
+                * iovec sizes + incoming iovec sizes vs. virtio-scsi request +
+                * response headers respectively.
                 *
-                * Also extract virtio_scsi header bits for vhost_scsi_get_tag()
+                * For DMA_TO_DEVICE this is out_iter, which is already pointing
+                * to the right place.
+                *
+                * For DMA_FROM_DEVICE, the iovec will be just past the end
+                * of the virtio-scsi response header in either the same
+                * or immediately following iovec.
+                *
+                * Any associated T10_PI bytes for the outgoing / incoming
+                * payloads are included in calculation of exp_data_len here.
                 */
-               if (hdr_pi) {
+               prot_bytes = 0;
+
+               if (out_size > req_size) {
+                       data_direction = DMA_TO_DEVICE;
+                       exp_data_len = out_size - req_size;
+                       data_iter = out_iter;
+               } else if (in_size > rsp_size) {
+                       data_direction = DMA_FROM_DEVICE;
+                       exp_data_len = in_size - rsp_size;
+
+                       iov_iter_init(&in_iter, READ, &vq->iov[out], in,
+                                     rsp_size + exp_data_len);
+                       iov_iter_advance(&in_iter, rsp_size);
+                       data_iter = in_iter;
+               } else {
+                       data_direction = DMA_NONE;
+                       exp_data_len = 0;
+               }
+               /*
+                * If T10_PI header + payload is present, setup prot_iter values
+                * and recalculate data_iter for vhost_scsi_mapal() mapping to
+                * host scatterlists via get_user_pages_fast().
+                */
+               if (t10_pi) {
                        if (v_req_pi.pi_bytesout) {
                                if (data_direction != DMA_TO_DEVICE) {
-                                       vq_err(vq, "Received non zero do_pi_niov"
-                                               ", but wrong data_direction\n");
-                                       goto err_cmd;
+                                       vq_err(vq, "Received non zero pi_bytesout,"
+                                               " but wrong data_direction\n");
+                                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                                       continue;
                                }
                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesout);
                        } else if (v_req_pi.pi_bytesin) {
                                if (data_direction != DMA_FROM_DEVICE) {
-                                       vq_err(vq, "Received non zero di_pi_niov"
-                                               ", but wrong data_direction\n");
-                                       goto err_cmd;
+                                       vq_err(vq, "Received non zero pi_bytesin,"
+                                               " but wrong data_direction\n");
+                                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                                       continue;
                                }
                                prot_bytes = vhost32_to_cpu(vq, v_req_pi.pi_bytesin);
                        }
+                       /*
+                        * Set prot_iter to data_iter, and advance past any
+                        * preceeding prot_bytes that may be present.
+                        *
+                        * Also fix up the exp_data_len to reflect only the
+                        * actual data payload length.
+                        */
                        if (prot_bytes) {
-                               int tmp = 0;
-
-                               for (i = 0; i < data_num; i++) {
-                                       tmp += vq->iov[data_first + i].iov_len;
-                                       prot_niov++;
-                                       if (tmp >= prot_bytes)
-                                               break;
-                               }
-                               prot_first = data_first;
-                               data_first += prot_niov;
-                               data_niov = data_num - prot_niov;
+                               exp_data_len -= prot_bytes;
+                               prot_iter = data_iter;
+                               iov_iter_advance(&data_iter, prot_bytes);
                        }
                        tag = vhost64_to_cpu(vq, v_req_pi.tag);
                        task_attr = v_req_pi.task_attr;
@@ -1147,83 +1179,65 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
                        cdb = &v_req.cdb[0];
                        lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
                }
-               exp_data_len = 0;
-               for (i = 0; i < data_niov; i++)
-                       exp_data_len += vq->iov[data_first + i].iov_len;
                /*
-                * Check that the recieved CDB size does not exceeded our
-                * hardcoded max for vhost-scsi
+                * Check that the received CDB size does not exceeded our
+                * hardcoded max for vhost-scsi, then get a pre-allocated
+                * cmd descriptor for the new virtio-scsi tag.
                 *
                 * TODO what if cdb was too small for varlen cdb header?
                 */
-               if (unlikely(scsi_command_size(cdb) > TCM_VHOST_MAX_CDB_SIZE)) {
+               if (unlikely(scsi_command_size(cdb) > VHOST_SCSI_MAX_CDB_SIZE)) {
                        vq_err(vq, "Received SCSI CDB with command_size: %d that"
                                " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
-                               scsi_command_size(cdb), TCM_VHOST_MAX_CDB_SIZE);
-                       goto err_cmd;
+                               scsi_command_size(cdb), VHOST_SCSI_MAX_CDB_SIZE);
+                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                       continue;
                }
-
                cmd = vhost_scsi_get_tag(vq, tpg, cdb, tag, lun, task_attr,
                                         exp_data_len + prot_bytes,
                                         data_direction);
                if (IS_ERR(cmd)) {
                        vq_err(vq, "vhost_scsi_get_tag failed %ld\n",
-                                       PTR_ERR(cmd));
-                       goto err_cmd;
+                              PTR_ERR(cmd));
+                       vhost_scsi_send_bad_target(vs, vq, head, out);
+                       continue;
                }
-
-               pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
-                       ": %d\n", cmd, exp_data_len, data_direction);
-
                cmd->tvc_vhost = vs;
                cmd->tvc_vq = vq;
-               cmd->tvc_resp = vq->iov[out].iov_base;
+               cmd->tvc_resp_iov = &vq->iov[out];
+               cmd->tvc_in_iovs = in;
 
                pr_debug("vhost_scsi got command opcode: %#02x, lun: %d\n",
-                       cmd->tvc_cdb[0], cmd->tvc_lun);
+                        cmd->tvc_cdb[0], cmd->tvc_lun);
+               pr_debug("cmd: %p exp_data_len: %d, prot_bytes: %d data_direction:"
+                        " %d\n", cmd, exp_data_len, prot_bytes, data_direction);
 
-               if (prot_niov) {
-                       ret = vhost_scsi_map_iov_to_prot(cmd,
-                                       &vq->iov[prot_first], prot_niov,
-                                       data_direction == DMA_FROM_DEVICE);
-                       if (unlikely(ret)) {
-                               vq_err(vq, "Failed to map iov to"
-                                       " prot_sgl\n");
-                               goto err_free;
-                       }
-               }
                if (data_direction != DMA_NONE) {
-                       ret = vhost_scsi_map_iov_to_sgl(cmd,
-                                       &vq->iov[data_first], data_niov,
-                                       data_direction == DMA_FROM_DEVICE);
+                       ret = vhost_scsi_mapal(cmd,
+                                              prot_bytes, &prot_iter,
+                                              exp_data_len, &data_iter);
                        if (unlikely(ret)) {
                                vq_err(vq, "Failed to map iov to sgl\n");
-                               goto err_free;
+                               vhost_scsi_release_cmd(&cmd->tvc_se_cmd);
+                               vhost_scsi_send_bad_target(vs, vq, head, out);
+                               continue;
                        }
                }
                /*
                 * Save the descriptor from vhost_get_vq_desc() to be used to
                 * complete the virtio-scsi request in TCM callback context via
-                * tcm_vhost_queue_data_in() and tcm_vhost_queue_status()
+                * vhost_scsi_queue_data_in() and vhost_scsi_queue_status()
                 */
                cmd->tvc_vq_desc = head;
                /*
-                * Dispatch tv_cmd descriptor for cmwq execution in process
-                * context provided by tcm_vhost_workqueue.  This also ensures
-                * tv_cmd is executed on the same kworker CPU as this vhost
-                * thread to gain positive L2 cache locality effects..
+                * Dispatch cmd descriptor for cmwq execution in process
+                * context provided by vhost_scsi_workqueue.  This also ensures
+                * cmd is executed on the same kworker CPU as this vhost
+                * thread to gain positive L2 cache locality effects.
                 */
-               INIT_WORK(&cmd->work, tcm_vhost_submission_work);
-               queue_work(tcm_vhost_workqueue, &cmd->work);
+               INIT_WORK(&cmd->work, vhost_scsi_submission_work);
+               queue_work(vhost_scsi_workqueue, &cmd->work);
        }
-
-       mutex_unlock(&vq->mutex);
-       return;
-
-err_free:
-       vhost_scsi_free_cmd(cmd);
-err_cmd:
-       vhost_scsi_send_bad_target(vs, vq, head, out);
 out:
        mutex_unlock(&vq->mutex);
 }
@@ -1234,15 +1248,15 @@ static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
 }
 
 static void
-tcm_vhost_send_evt(struct vhost_scsi *vs,
-                  struct tcm_vhost_tpg *tpg,
+vhost_scsi_send_evt(struct vhost_scsi *vs,
+                  struct vhost_scsi_tpg *tpg,
                   struct se_lun *lun,
                   u32 event,
                   u32 reason)
 {
-       struct tcm_vhost_evt *evt;
+       struct vhost_scsi_evt *evt;
 
-       evt = tcm_vhost_allocate_evt(vs, event, reason);
+       evt = vhost_scsi_allocate_evt(vs, event, reason);
        if (!evt)
                return;
 
@@ -1253,7 +1267,7 @@ tcm_vhost_send_evt(struct vhost_scsi *vs,
                 * lun[4-7] need to be zero according to virtio-scsi spec.
                 */
                evt->event.lun[0] = 0x01;
-               evt->event.lun[1] = tpg->tport_tpgt & 0xFF;
+               evt->event.lun[1] = tpg->tport_tpgt;
                if (lun->unpacked_lun >= 256)
                        evt->event.lun[2] = lun->unpacked_lun >> 8 | 0x40 ;
                evt->event.lun[3] = lun->unpacked_lun & 0xFF;
@@ -1274,7 +1288,7 @@ static void vhost_scsi_evt_handle_kick(struct vhost_work *work)
                goto out;
 
        if (vs->vs_events_missed)
-               tcm_vhost_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
+               vhost_scsi_send_evt(vs, NULL, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
 out:
        mutex_unlock(&vq->mutex);
 }
@@ -1300,7 +1314,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
        int i;
 
        /* Init new inflight and remember the old inflight */
-       tcm_vhost_init_inflight(vs, old_inflight);
+       vhost_scsi_init_inflight(vs, old_inflight);
 
        /*
         * The inflight->kref was initialized to 1. We decrement it here to
@@ -1308,7 +1322,7 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
         * when all the reqs are finished.
         */
        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
-               kref_put(&old_inflight[i]->kref, tcm_vhost_done_inflight);
+               kref_put(&old_inflight[i]->kref, vhost_scsi_done_inflight);
 
        /* Flush both the vhost poll and vhost work */
        for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
@@ -1323,24 +1337,24 @@ static void vhost_scsi_flush(struct vhost_scsi *vs)
 
 /*
  * Called from vhost_scsi_ioctl() context to walk the list of available
- * tcm_vhost_tpg with an active struct tcm_vhost_nexus
+ * vhost_scsi_tpg with an active struct vhost_scsi_nexus
  *
  *  The lock nesting rule is:
- *    tcm_vhost_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
+ *    vhost_scsi_mutex -> vs->dev.mutex -> tpg->tv_tpg_mutex -> vq->mutex
  */
 static int
 vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                        struct vhost_scsi_target *t)
 {
        struct se_portal_group *se_tpg;
-       struct tcm_vhost_tport *tv_tport;
-       struct tcm_vhost_tpg *tpg;
-       struct tcm_vhost_tpg **vs_tpg;
+       struct vhost_scsi_tport *tv_tport;
+       struct vhost_scsi_tpg *tpg;
+       struct vhost_scsi_tpg **vs_tpg;
        struct vhost_virtqueue *vq;
        int index, ret, i, len;
        bool match = false;
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
        mutex_lock(&vs->dev.mutex);
 
        /* Verify that ring has been setup correctly. */
@@ -1361,7 +1375,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
        if (vs->vs_tpg)
                memcpy(vs_tpg, vs->vs_tpg, len);
 
-       list_for_each_entry(tpg, &tcm_vhost_list, tv_tpg_list) {
+       list_for_each_entry(tpg, &vhost_scsi_list, tv_tpg_list) {
                mutex_lock(&tpg->tv_tpg_mutex);
                if (!tpg->tpg_nexus) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1429,7 +1443,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
 
 out:
        mutex_unlock(&vs->dev.mutex);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
        return ret;
 }
 
@@ -1438,14 +1452,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
                          struct vhost_scsi_target *t)
 {
        struct se_portal_group *se_tpg;
-       struct tcm_vhost_tport *tv_tport;
-       struct tcm_vhost_tpg *tpg;
+       struct vhost_scsi_tport *tv_tport;
+       struct vhost_scsi_tpg *tpg;
        struct vhost_virtqueue *vq;
        bool match = false;
        int index, ret, i;
        u8 target;
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
        mutex_lock(&vs->dev.mutex);
        /* Verify that ring has been setup correctly. */
        for (index = 0; index < vs->dev.nvqs; ++index) {
@@ -1511,14 +1525,14 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
        vs->vs_tpg = NULL;
        WARN_ON(vs->vs_events_nr);
        mutex_unlock(&vs->dev.mutex);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
        return 0;
 
 err_tpg:
        mutex_unlock(&tpg->tv_tpg_mutex);
 err_dev:
        mutex_unlock(&vs->dev.mutex);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
        return ret;
 }
 
@@ -1565,7 +1579,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
                goto err_vqs;
 
        vhost_work_init(&vs->vs_completion_work, vhost_scsi_complete_cmd_work);
-       vhost_work_init(&vs->vs_event_work, tcm_vhost_evt_work);
+       vhost_work_init(&vs->vs_event_work, vhost_scsi_evt_work);
 
        vs->vs_events_nr = 0;
        vs->vs_events_missed = false;
@@ -1580,7 +1594,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
        }
        vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
 
-       tcm_vhost_init_inflight(vs, NULL);
+       vhost_scsi_init_inflight(vs, NULL);
 
        f->private_data = vs;
        return 0;
@@ -1712,7 +1726,7 @@ static int vhost_scsi_deregister(void)
        return misc_deregister(&vhost_scsi_misc);
 }
 
-static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
+static char *vhost_scsi_dump_proto_id(struct vhost_scsi_tport *tport)
 {
        switch (tport->tport_proto_id) {
        case SCSI_PROTOCOL_SAS:
@@ -1729,7 +1743,7 @@ static char *tcm_vhost_dump_proto_id(struct tcm_vhost_tport *tport)
 }
 
 static void
-tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
+vhost_scsi_do_plug(struct vhost_scsi_tpg *tpg,
                  struct se_lun *lun, bool plug)
 {
 
@@ -1750,71 +1764,71 @@ tcm_vhost_do_plug(struct tcm_vhost_tpg *tpg,
        vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq;
        mutex_lock(&vq->mutex);
        if (vhost_has_feature(vq, VIRTIO_SCSI_F_HOTPLUG))
-               tcm_vhost_send_evt(vs, tpg, lun,
+               vhost_scsi_send_evt(vs, tpg, lun,
                                   VIRTIO_SCSI_T_TRANSPORT_RESET, reason);
        mutex_unlock(&vq->mutex);
        mutex_unlock(&vs->dev.mutex);
 }
 
-static void tcm_vhost_hotplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+static void vhost_scsi_hotplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
 {
-       tcm_vhost_do_plug(tpg, lun, true);
+       vhost_scsi_do_plug(tpg, lun, true);
 }
 
-static void tcm_vhost_hotunplug(struct tcm_vhost_tpg *tpg, struct se_lun *lun)
+static void vhost_scsi_hotunplug(struct vhost_scsi_tpg *tpg, struct se_lun *lun)
 {
-       tcm_vhost_do_plug(tpg, lun, false);
+       vhost_scsi_do_plug(tpg, lun, false);
 }
 
-static int tcm_vhost_port_link(struct se_portal_group *se_tpg,
+static int vhost_scsi_port_link(struct se_portal_group *se_tpg,
                               struct se_lun *lun)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
 
        mutex_lock(&tpg->tv_tpg_mutex);
        tpg->tv_tpg_port_count++;
        mutex_unlock(&tpg->tv_tpg_mutex);
 
-       tcm_vhost_hotplug(tpg, lun);
+       vhost_scsi_hotplug(tpg, lun);
 
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
 
        return 0;
 }
 
-static void tcm_vhost_port_unlink(struct se_portal_group *se_tpg,
+static void vhost_scsi_port_unlink(struct se_portal_group *se_tpg,
                                  struct se_lun *lun)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
 
        mutex_lock(&tpg->tv_tpg_mutex);
        tpg->tv_tpg_port_count--;
        mutex_unlock(&tpg->tv_tpg_mutex);
 
-       tcm_vhost_hotunplug(tpg, lun);
+       vhost_scsi_hotunplug(tpg, lun);
 
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
 }
 
 static struct se_node_acl *
-tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
+vhost_scsi_make_nodeacl(struct se_portal_group *se_tpg,
                       struct config_group *group,
                       const char *name)
 {
        struct se_node_acl *se_nacl, *se_nacl_new;
-       struct tcm_vhost_nacl *nacl;
+       struct vhost_scsi_nacl *nacl;
        u64 wwpn = 0;
        u32 nexus_depth;
 
-       /* tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+       /* vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
                return ERR_PTR(-EINVAL); */
-       se_nacl_new = tcm_vhost_alloc_fabric_acl(se_tpg);
+       se_nacl_new = vhost_scsi_alloc_fabric_acl(se_tpg);
        if (!se_nacl_new)
                return ERR_PTR(-ENOMEM);
 
@@ -1826,37 +1840,37 @@ tcm_vhost_make_nodeacl(struct se_portal_group *se_tpg,
        se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
                                name, nexus_depth);
        if (IS_ERR(se_nacl)) {
-               tcm_vhost_release_fabric_acl(se_tpg, se_nacl_new);
+               vhost_scsi_release_fabric_acl(se_tpg, se_nacl_new);
                return se_nacl;
        }
        /*
-        * Locate our struct tcm_vhost_nacl and set the FC Nport WWPN
+        * Locate our struct vhost_scsi_nacl and set the FC Nport WWPN
         */
-       nacl = container_of(se_nacl, struct tcm_vhost_nacl, se_node_acl);
+       nacl = container_of(se_nacl, struct vhost_scsi_nacl, se_node_acl);
        nacl->iport_wwpn = wwpn;
 
        return se_nacl;
 }
 
-static void tcm_vhost_drop_nodeacl(struct se_node_acl *se_acl)
+static void vhost_scsi_drop_nodeacl(struct se_node_acl *se_acl)
 {
-       struct tcm_vhost_nacl *nacl = container_of(se_acl,
-                               struct tcm_vhost_nacl, se_node_acl);
+       struct vhost_scsi_nacl *nacl = container_of(se_acl,
+                               struct vhost_scsi_nacl, se_node_acl);
        core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);
        kfree(nacl);
 }
 
-static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
+static void vhost_scsi_free_cmd_map_res(struct vhost_scsi_nexus *nexus,
                                       struct se_session *se_sess)
 {
-       struct tcm_vhost_cmd *tv_cmd;
+       struct vhost_scsi_cmd *tv_cmd;
        unsigned int i;
 
        if (!se_sess->sess_cmd_map)
                return;
 
-       for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
-               tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
+       for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
+               tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
 
                kfree(tv_cmd->tvc_sgl);
                kfree(tv_cmd->tvc_prot_sgl);
@@ -1864,13 +1878,13 @@ static void tcm_vhost_free_cmd_map_res(struct tcm_vhost_nexus *nexus,
        }
 }
 
-static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
+static int vhost_scsi_make_nexus(struct vhost_scsi_tpg *tpg,
                                const char *name)
 {
        struct se_portal_group *se_tpg;
        struct se_session *se_sess;
-       struct tcm_vhost_nexus *tv_nexus;
-       struct tcm_vhost_cmd *tv_cmd;
+       struct vhost_scsi_nexus *tv_nexus;
+       struct vhost_scsi_cmd *tv_cmd;
        unsigned int i;
 
        mutex_lock(&tpg->tv_tpg_mutex);
@@ -1881,19 +1895,19 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
        }
        se_tpg = &tpg->se_tpg;
 
-       tv_nexus = kzalloc(sizeof(struct tcm_vhost_nexus), GFP_KERNEL);
+       tv_nexus = kzalloc(sizeof(struct vhost_scsi_nexus), GFP_KERNEL);
        if (!tv_nexus) {
                mutex_unlock(&tpg->tv_tpg_mutex);
-               pr_err("Unable to allocate struct tcm_vhost_nexus\n");
+               pr_err("Unable to allocate struct vhost_scsi_nexus\n");
                return -ENOMEM;
        }
        /*
         *  Initialize the struct se_session pointer and setup tagpool
-        *  for struct tcm_vhost_cmd descriptors
+        *  for struct vhost_scsi_cmd descriptors
         */
        tv_nexus->tvn_se_sess = transport_init_session_tags(
-                                       TCM_VHOST_DEFAULT_TAGS,
-                                       sizeof(struct tcm_vhost_cmd),
+                                       VHOST_SCSI_DEFAULT_TAGS,
+                                       sizeof(struct vhost_scsi_cmd),
                                        TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS);
        if (IS_ERR(tv_nexus->tvn_se_sess)) {
                mutex_unlock(&tpg->tv_tpg_mutex);
@@ -1901,11 +1915,11 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
                return -ENOMEM;
        }
        se_sess = tv_nexus->tvn_se_sess;
-       for (i = 0; i < TCM_VHOST_DEFAULT_TAGS; i++) {
-               tv_cmd = &((struct tcm_vhost_cmd *)se_sess->sess_cmd_map)[i];
+       for (i = 0; i < VHOST_SCSI_DEFAULT_TAGS; i++) {
+               tv_cmd = &((struct vhost_scsi_cmd *)se_sess->sess_cmd_map)[i];
 
                tv_cmd->tvc_sgl = kzalloc(sizeof(struct scatterlist) *
-                                       TCM_VHOST_PREALLOC_SGLS, GFP_KERNEL);
+                                       VHOST_SCSI_PREALLOC_SGLS, GFP_KERNEL);
                if (!tv_cmd->tvc_sgl) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_sgl\n");
@@ -1913,7 +1927,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
                }
 
                tv_cmd->tvc_upages = kzalloc(sizeof(struct page *) *
-                                       TCM_VHOST_PREALLOC_UPAGES, GFP_KERNEL);
+                                       VHOST_SCSI_PREALLOC_UPAGES, GFP_KERNEL);
                if (!tv_cmd->tvc_upages) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_upages\n");
@@ -1921,7 +1935,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
                }
 
                tv_cmd->tvc_prot_sgl = kzalloc(sizeof(struct scatterlist) *
-                                       TCM_VHOST_PREALLOC_PROT_SGLS, GFP_KERNEL);
+                                       VHOST_SCSI_PREALLOC_PROT_SGLS, GFP_KERNEL);
                if (!tv_cmd->tvc_prot_sgl) {
                        mutex_unlock(&tpg->tv_tpg_mutex);
                        pr_err("Unable to allocate tv_cmd->tvc_prot_sgl\n");
@@ -1930,7 +1944,7 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
        }
        /*
         * Since we are running in 'demo mode' this call with generate a
-        * struct se_node_acl for the tcm_vhost struct se_portal_group with
+        * struct se_node_acl for the vhost_scsi struct se_portal_group with
         * the SCSI Initiator port name of the passed configfs group 'name'.
         */
        tv_nexus->tvn_se_sess->se_node_acl = core_tpg_check_initiator_node_acl(
@@ -1953,16 +1967,16 @@ static int tcm_vhost_make_nexus(struct tcm_vhost_tpg *tpg,
        return 0;
 
 out:
-       tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
+       vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
        transport_free_session(se_sess);
        kfree(tv_nexus);
        return -ENOMEM;
 }
 
-static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
+static int vhost_scsi_drop_nexus(struct vhost_scsi_tpg *tpg)
 {
        struct se_session *se_sess;
-       struct tcm_vhost_nexus *tv_nexus;
+       struct vhost_scsi_nexus *tv_nexus;
 
        mutex_lock(&tpg->tv_tpg_mutex);
        tv_nexus = tpg->tpg_nexus;
@@ -1994,10 +2008,10 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
        }
 
        pr_debug("TCM_vhost_ConfigFS: Removing I_T Nexus to emulated"
-               " %s Initiator Port: %s\n", tcm_vhost_dump_proto_id(tpg->tport),
+               " %s Initiator Port: %s\n", vhost_scsi_dump_proto_id(tpg->tport),
                tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
 
-       tcm_vhost_free_cmd_map_res(tv_nexus, se_sess);
+       vhost_scsi_free_cmd_map_res(tv_nexus, se_sess);
        /*
         * Release the SCSI I_T Nexus to the emulated vhost Target Port
         */
@@ -2009,12 +2023,12 @@ static int tcm_vhost_drop_nexus(struct tcm_vhost_tpg *tpg)
        return 0;
 }
 
-static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
+static ssize_t vhost_scsi_tpg_show_nexus(struct se_portal_group *se_tpg,
                                        char *page)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_nexus *tv_nexus;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_nexus *tv_nexus;
        ssize_t ret;
 
        mutex_lock(&tpg->tv_tpg_mutex);
@@ -2030,40 +2044,40 @@ static ssize_t tcm_vhost_tpg_show_nexus(struct se_portal_group *se_tpg,
        return ret;
 }
 
-static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
+static ssize_t vhost_scsi_tpg_store_nexus(struct se_portal_group *se_tpg,
                                         const char *page,
                                         size_t count)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
-       struct tcm_vhost_tport *tport_wwn = tpg->tport;
-       unsigned char i_port[TCM_VHOST_NAMELEN], *ptr, *port_ptr;
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
+       struct vhost_scsi_tport *tport_wwn = tpg->tport;
+       unsigned char i_port[VHOST_SCSI_NAMELEN], *ptr, *port_ptr;
        int ret;
        /*
         * Shutdown the active I_T nexus if 'NULL' is passed..
         */
        if (!strncmp(page, "NULL", 4)) {
-               ret = tcm_vhost_drop_nexus(tpg);
+               ret = vhost_scsi_drop_nexus(tpg);
                return (!ret) ? count : ret;
        }
        /*
         * Otherwise make sure the passed virtual Initiator port WWN matches
-        * the fabric protocol_id set in tcm_vhost_make_tport(), and call
-        * tcm_vhost_make_nexus().
+        * the fabric protocol_id set in vhost_scsi_make_tport(), and call
+        * vhost_scsi_make_nexus().
         */
-       if (strlen(page) >= TCM_VHOST_NAMELEN) {
+       if (strlen(page) >= VHOST_SCSI_NAMELEN) {
                pr_err("Emulated NAA Sas Address: %s, exceeds"
-                               " max: %d\n", page, TCM_VHOST_NAMELEN);
+                               " max: %d\n", page, VHOST_SCSI_NAMELEN);
                return -EINVAL;
        }
-       snprintf(&i_port[0], TCM_VHOST_NAMELEN, "%s", page);
+       snprintf(&i_port[0], VHOST_SCSI_NAMELEN, "%s", page);
 
        ptr = strstr(i_port, "naa.");
        if (ptr) {
                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
                        pr_err("Passed SAS Initiator Port %s does not"
                                " match target port protoid: %s\n", i_port,
-                               tcm_vhost_dump_proto_id(tport_wwn));
+                               vhost_scsi_dump_proto_id(tport_wwn));
                        return -EINVAL;
                }
                port_ptr = &i_port[0];
@@ -2074,7 +2088,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
                        pr_err("Passed FCP Initiator Port %s does not"
                                " match target port protoid: %s\n", i_port,
-                               tcm_vhost_dump_proto_id(tport_wwn));
+                               vhost_scsi_dump_proto_id(tport_wwn));
                        return -EINVAL;
                }
                port_ptr = &i_port[3]; /* Skip over "fc." */
@@ -2085,7 +2099,7 @@ static ssize_t tcm_vhost_tpg_store_nexus(struct se_portal_group *se_tpg,
                if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
                        pr_err("Passed iSCSI Initiator Port %s does not"
                                " match target port protoid: %s\n", i_port,
-                               tcm_vhost_dump_proto_id(tport_wwn));
+                               vhost_scsi_dump_proto_id(tport_wwn));
                        return -EINVAL;
                }
                port_ptr = &i_port[0];
@@ -2101,40 +2115,40 @@ check_newline:
        if (i_port[strlen(i_port)-1] == '\n')
                i_port[strlen(i_port)-1] = '\0';
 
-       ret = tcm_vhost_make_nexus(tpg, port_ptr);
+       ret = vhost_scsi_make_nexus(tpg, port_ptr);
        if (ret < 0)
                return ret;
 
        return count;
 }
 
-TF_TPG_BASE_ATTR(tcm_vhost, nexus, S_IRUGO | S_IWUSR);
+TF_TPG_BASE_ATTR(vhost_scsi, nexus, S_IRUGO | S_IWUSR);
 
-static struct configfs_attribute *tcm_vhost_tpg_attrs[] = {
-       &tcm_vhost_tpg_nexus.attr,
+static struct configfs_attribute *vhost_scsi_tpg_attrs[] = {
+       &vhost_scsi_tpg_nexus.attr,
        NULL,
 };
 
 static struct se_portal_group *
-tcm_vhost_make_tpg(struct se_wwn *wwn,
+vhost_scsi_make_tpg(struct se_wwn *wwn,
                   struct config_group *group,
                   const char *name)
 {
-       struct tcm_vhost_tport *tport = container_of(wwn,
-                       struct tcm_vhost_tport, tport_wwn);
+       struct vhost_scsi_tport *tport = container_of(wwn,
+                       struct vhost_scsi_tport, tport_wwn);
 
-       struct tcm_vhost_tpg *tpg;
-       unsigned long tpgt;
+       struct vhost_scsi_tpg *tpg;
+       u16 tpgt;
        int ret;
 
        if (strstr(name, "tpgt_") != name)
                return ERR_PTR(-EINVAL);
-       if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
+       if (kstrtou16(name + 5, 10, &tpgt) || tpgt >= VHOST_SCSI_MAX_TARGET)
                return ERR_PTR(-EINVAL);
 
-       tpg = kzalloc(sizeof(struct tcm_vhost_tpg), GFP_KERNEL);
+       tpg = kzalloc(sizeof(struct vhost_scsi_tpg), GFP_KERNEL);
        if (!tpg) {
-               pr_err("Unable to allocate struct tcm_vhost_tpg");
+               pr_err("Unable to allocate struct vhost_scsi_tpg");
                return ERR_PTR(-ENOMEM);
        }
        mutex_init(&tpg->tv_tpg_mutex);
@@ -2142,31 +2156,31 @@ tcm_vhost_make_tpg(struct se_wwn *wwn,
        tpg->tport = tport;
        tpg->tport_tpgt = tpgt;
 
-       ret = core_tpg_register(&tcm_vhost_fabric_configfs->tf_ops, wwn,
+       ret = core_tpg_register(&vhost_scsi_fabric_configfs->tf_ops, wwn,
                                &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
        if (ret < 0) {
                kfree(tpg);
                return NULL;
        }
-       mutex_lock(&tcm_vhost_mutex);
-       list_add_tail(&tpg->tv_tpg_list, &tcm_vhost_list);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
+       list_add_tail(&tpg->tv_tpg_list, &vhost_scsi_list);
+       mutex_unlock(&vhost_scsi_mutex);
 
        return &tpg->se_tpg;
 }
 
-static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
+static void vhost_scsi_drop_tpg(struct se_portal_group *se_tpg)
 {
-       struct tcm_vhost_tpg *tpg = container_of(se_tpg,
-                               struct tcm_vhost_tpg, se_tpg);
+       struct vhost_scsi_tpg *tpg = container_of(se_tpg,
+                               struct vhost_scsi_tpg, se_tpg);
 
-       mutex_lock(&tcm_vhost_mutex);
+       mutex_lock(&vhost_scsi_mutex);
        list_del(&tpg->tv_tpg_list);
-       mutex_unlock(&tcm_vhost_mutex);
+       mutex_unlock(&vhost_scsi_mutex);
        /*
         * Release the virtual I_T Nexus for this vhost TPG
         */
-       tcm_vhost_drop_nexus(tpg);
+       vhost_scsi_drop_nexus(tpg);
        /*
         * Deregister the se_tpg from TCM..
         */
@@ -2175,21 +2189,21 @@ static void tcm_vhost_drop_tpg(struct se_portal_group *se_tpg)
 }
 
 static struct se_wwn *
-tcm_vhost_make_tport(struct target_fabric_configfs *tf,
+vhost_scsi_make_tport(struct target_fabric_configfs *tf,
                     struct config_group *group,
                     const char *name)
 {
-       struct tcm_vhost_tport *tport;
+       struct vhost_scsi_tport *tport;
        char *ptr;
        u64 wwpn = 0;
        int off = 0;
 
-       /* if (tcm_vhost_parse_wwn(name, &wwpn, 1) < 0)
+       /* if (vhost_scsi_parse_wwn(name, &wwpn, 1) < 0)
                return ERR_PTR(-EINVAL); */
 
-       tport = kzalloc(sizeof(struct tcm_vhost_tport), GFP_KERNEL);
+       tport = kzalloc(sizeof(struct vhost_scsi_tport), GFP_KERNEL);
        if (!tport) {
-               pr_err("Unable to allocate struct tcm_vhost_tport");
+               pr_err("Unable to allocate struct vhost_scsi_tport");
                return ERR_PTR(-ENOMEM);
        }
        tport->tport_wwpn = wwpn;
@@ -2220,102 +2234,102 @@ tcm_vhost_make_tport(struct target_fabric_configfs *tf,
        return ERR_PTR(-EINVAL);
 
 check_len:
-       if (strlen(name) >= TCM_VHOST_NAMELEN) {
+       if (strlen(name) >= VHOST_SCSI_NAMELEN) {
                pr_err("Emulated %s Address: %s, exceeds"
-                       " max: %d\n", name, tcm_vhost_dump_proto_id(tport),
-                       TCM_VHOST_NAMELEN);
+                       " max: %d\n", name, vhost_scsi_dump_proto_id(tport),
+                       VHOST_SCSI_NAMELEN);
                kfree(tport);
                return ERR_PTR(-EINVAL);
        }
-       snprintf(&tport->tport_name[0], TCM_VHOST_NAMELEN, "%s", &name[off]);
+       snprintf(&tport->tport_name[0], VHOST_SCSI_NAMELEN, "%s", &name[off]);
 
        pr_debug("TCM_VHost_ConfigFS: Allocated emulated Target"
-               " %s Address: %s\n", tcm_vhost_dump_proto_id(tport), name);
+               " %s Address: %s\n", vhost_scsi_dump_proto_id(tport), name);
 
        return &tport->tport_wwn;
 }
 
-static void tcm_vhost_drop_tport(struct se_wwn *wwn)
+static void vhost_scsi_drop_tport(struct se_wwn *wwn)
 {
-       struct tcm_vhost_tport *tport = container_of(wwn,
-                               struct tcm_vhost_tport, tport_wwn);
+       struct vhost_scsi_tport *tport = container_of(wwn,
+                               struct vhost_scsi_tport, tport_wwn);
 
        pr_debug("TCM_VHost_ConfigFS: Deallocating emulated Target"
-               " %s Address: %s\n", tcm_vhost_dump_proto_id(tport),
+               " %s Address: %s\n", vhost_scsi_dump_proto_id(tport),
                tport->tport_name);
 
        kfree(tport);
 }
 
 static ssize_t
-tcm_vhost_wwn_show_attr_version(struct target_fabric_configfs *tf,
+vhost_scsi_wwn_show_attr_version(struct target_fabric_configfs *tf,
                                char *page)
 {
        return sprintf(page, "TCM_VHOST fabric module %s on %s/%s"
-               "on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+               "on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
                utsname()->machine);
 }
 
-TF_WWN_ATTR_RO(tcm_vhost, version);
+TF_WWN_ATTR_RO(vhost_scsi, version);
 
-static struct configfs_attribute *tcm_vhost_wwn_attrs[] = {
-       &tcm_vhost_wwn_version.attr,
+static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
+       &vhost_scsi_wwn_version.attr,
        NULL,
 };
 
-static struct target_core_fabric_ops tcm_vhost_ops = {
-       .get_fabric_name                = tcm_vhost_get_fabric_name,
-       .get_fabric_proto_ident         = tcm_vhost_get_fabric_proto_ident,
-       .tpg_get_wwn                    = tcm_vhost_get_fabric_wwn,
-       .tpg_get_tag                    = tcm_vhost_get_tag,
-       .tpg_get_default_depth          = tcm_vhost_get_default_depth,
-       .tpg_get_pr_transport_id        = tcm_vhost_get_pr_transport_id,
-       .tpg_get_pr_transport_id_len    = tcm_vhost_get_pr_transport_id_len,
-       .tpg_parse_pr_out_transport_id  = tcm_vhost_parse_pr_out_transport_id,
-       .tpg_check_demo_mode            = tcm_vhost_check_true,
-       .tpg_check_demo_mode_cache      = tcm_vhost_check_true,
-       .tpg_check_demo_mode_write_protect = tcm_vhost_check_false,
-       .tpg_check_prod_mode_write_protect = tcm_vhost_check_false,
-       .tpg_alloc_fabric_acl           = tcm_vhost_alloc_fabric_acl,
-       .tpg_release_fabric_acl         = tcm_vhost_release_fabric_acl,
-       .tpg_get_inst_index             = tcm_vhost_tpg_get_inst_index,
-       .release_cmd                    = tcm_vhost_release_cmd,
+static struct target_core_fabric_ops vhost_scsi_ops = {
+       .get_fabric_name                = vhost_scsi_get_fabric_name,
+       .get_fabric_proto_ident         = vhost_scsi_get_fabric_proto_ident,
+       .tpg_get_wwn                    = vhost_scsi_get_fabric_wwn,
+       .tpg_get_tag                    = vhost_scsi_get_tpgt,
+       .tpg_get_default_depth          = vhost_scsi_get_default_depth,
+       .tpg_get_pr_transport_id        = vhost_scsi_get_pr_transport_id,
+       .tpg_get_pr_transport_id_len    = vhost_scsi_get_pr_transport_id_len,
+       .tpg_parse_pr_out_transport_id  = vhost_scsi_parse_pr_out_transport_id,
+       .tpg_check_demo_mode            = vhost_scsi_check_true,
+       .tpg_check_demo_mode_cache      = vhost_scsi_check_true,
+       .tpg_check_demo_mode_write_protect = vhost_scsi_check_false,
+       .tpg_check_prod_mode_write_protect = vhost_scsi_check_false,
+       .tpg_alloc_fabric_acl           = vhost_scsi_alloc_fabric_acl,
+       .tpg_release_fabric_acl         = vhost_scsi_release_fabric_acl,
+       .tpg_get_inst_index             = vhost_scsi_tpg_get_inst_index,
+       .release_cmd                    = vhost_scsi_release_cmd,
        .check_stop_free                = vhost_scsi_check_stop_free,
-       .shutdown_session               = tcm_vhost_shutdown_session,
-       .close_session                  = tcm_vhost_close_session,
-       .sess_get_index                 = tcm_vhost_sess_get_index,
+       .shutdown_session               = vhost_scsi_shutdown_session,
+       .close_session                  = vhost_scsi_close_session,
+       .sess_get_index                 = vhost_scsi_sess_get_index,
        .sess_get_initiator_sid         = NULL,
-       .write_pending                  = tcm_vhost_write_pending,
-       .write_pending_status           = tcm_vhost_write_pending_status,
-       .set_default_node_attributes    = tcm_vhost_set_default_node_attrs,
-       .get_task_tag                   = tcm_vhost_get_task_tag,
-       .get_cmd_state                  = tcm_vhost_get_cmd_state,
-       .queue_data_in                  = tcm_vhost_queue_data_in,
-       .queue_status                   = tcm_vhost_queue_status,
-       .queue_tm_rsp                   = tcm_vhost_queue_tm_rsp,
-       .aborted_task                   = tcm_vhost_aborted_task,
+       .write_pending                  = vhost_scsi_write_pending,
+       .write_pending_status           = vhost_scsi_write_pending_status,
+       .set_default_node_attributes    = vhost_scsi_set_default_node_attrs,
+       .get_task_tag                   = vhost_scsi_get_task_tag,
+       .get_cmd_state                  = vhost_scsi_get_cmd_state,
+       .queue_data_in                  = vhost_scsi_queue_data_in,
+       .queue_status                   = vhost_scsi_queue_status,
+       .queue_tm_rsp                   = vhost_scsi_queue_tm_rsp,
+       .aborted_task                   = vhost_scsi_aborted_task,
        /*
         * Setup callers for generic logic in target_core_fabric_configfs.c
         */
-       .fabric_make_wwn                = tcm_vhost_make_tport,
-       .fabric_drop_wwn                = tcm_vhost_drop_tport,
-       .fabric_make_tpg                = tcm_vhost_make_tpg,
-       .fabric_drop_tpg                = tcm_vhost_drop_tpg,
-       .fabric_post_link               = tcm_vhost_port_link,
-       .fabric_pre_unlink              = tcm_vhost_port_unlink,
+       .fabric_make_wwn                = vhost_scsi_make_tport,
+       .fabric_drop_wwn                = vhost_scsi_drop_tport,
+       .fabric_make_tpg                = vhost_scsi_make_tpg,
+       .fabric_drop_tpg                = vhost_scsi_drop_tpg,
+       .fabric_post_link               = vhost_scsi_port_link,
+       .fabric_pre_unlink              = vhost_scsi_port_unlink,
        .fabric_make_np                 = NULL,
        .fabric_drop_np                 = NULL,
-       .fabric_make_nodeacl            = tcm_vhost_make_nodeacl,
-       .fabric_drop_nodeacl            = tcm_vhost_drop_nodeacl,
+       .fabric_make_nodeacl            = vhost_scsi_make_nodeacl,
+       .fabric_drop_nodeacl            = vhost_scsi_drop_nodeacl,
 };
 
-static int tcm_vhost_register_configfs(void)
+static int vhost_scsi_register_configfs(void)
 {
        struct target_fabric_configfs *fabric;
        int ret;
 
-       pr_debug("TCM_VHOST fabric module %s on %s/%s"
-               " on "UTS_RELEASE"\n", TCM_VHOST_VERSION, utsname()->sysname,
+       pr_debug("vhost-scsi fabric module %s on %s/%s"
+               " on "UTS_RELEASE"\n", VHOST_SCSI_VERSION, utsname()->sysname,
                utsname()->machine);
        /*
         * Register the top level struct config_item_type with TCM core
@@ -2326,14 +2340,14 @@ static int tcm_vhost_register_configfs(void)
                return PTR_ERR(fabric);
        }
        /*
-        * Setup fabric->tf_ops from our local tcm_vhost_ops
+        * Setup fabric->tf_ops from our local vhost_scsi_ops
         */
-       fabric->tf_ops = tcm_vhost_ops;
+       fabric->tf_ops = vhost_scsi_ops;
        /*
         * Setup default attribute lists for various fabric->tf_cit_tmpl
         */
-       fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_vhost_wwn_attrs;
-       fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = tcm_vhost_tpg_attrs;
+       fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = vhost_scsi_wwn_attrs;
+       fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = vhost_scsi_tpg_attrs;
        fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
        fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
        fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
@@ -2353,37 +2367,37 @@ static int tcm_vhost_register_configfs(void)
        /*
         * Setup our local pointer to *fabric
         */
-       tcm_vhost_fabric_configfs = fabric;
-       pr_debug("TCM_VHOST[0] - Set fabric -> tcm_vhost_fabric_configfs\n");
+       vhost_scsi_fabric_configfs = fabric;
+       pr_debug("TCM_VHOST[0] - Set fabric -> vhost_scsi_fabric_configfs\n");
        return 0;
 };
 
-static void tcm_vhost_deregister_configfs(void)
+static void vhost_scsi_deregister_configfs(void)
 {
-       if (!tcm_vhost_fabric_configfs)
+       if (!vhost_scsi_fabric_configfs)
                return;
 
-       target_fabric_configfs_deregister(tcm_vhost_fabric_configfs);
-       tcm_vhost_fabric_configfs = NULL;
-       pr_debug("TCM_VHOST[0] - Cleared tcm_vhost_fabric_configfs\n");
+       target_fabric_configfs_deregister(vhost_scsi_fabric_configfs);
+       vhost_scsi_fabric_configfs = NULL;
+       pr_debug("TCM_VHOST[0] - Cleared vhost_scsi_fabric_configfs\n");
 };
 
-static int __init tcm_vhost_init(void)
+static int __init vhost_scsi_init(void)
 {
        int ret = -ENOMEM;
        /*
         * Use our own dedicated workqueue for submitting I/O into
         * target core to avoid contention within system_wq.
         */
-       tcm_vhost_workqueue = alloc_workqueue("tcm_vhost", 0, 0);
-       if (!tcm_vhost_workqueue)
+       vhost_scsi_workqueue = alloc_workqueue("vhost_scsi", 0, 0);
+       if (!vhost_scsi_workqueue)
                goto out;
 
        ret = vhost_scsi_register();
        if (ret < 0)
                goto out_destroy_workqueue;
 
-       ret = tcm_vhost_register_configfs();
+       ret = vhost_scsi_register_configfs();
        if (ret < 0)
                goto out_vhost_scsi_deregister;
 
@@ -2392,20 +2406,20 @@ static int __init tcm_vhost_init(void)
 out_vhost_scsi_deregister:
        vhost_scsi_deregister();
 out_destroy_workqueue:
-       destroy_workqueue(tcm_vhost_workqueue);
+       destroy_workqueue(vhost_scsi_workqueue);
 out:
        return ret;
 };
 
-static void tcm_vhost_exit(void)
+static void vhost_scsi_exit(void)
 {
-       tcm_vhost_deregister_configfs();
+       vhost_scsi_deregister_configfs();
        vhost_scsi_deregister();
-       destroy_workqueue(tcm_vhost_workqueue);
+       destroy_workqueue(vhost_scsi_workqueue);
 };
 
 MODULE_DESCRIPTION("VHOST_SCSI series fabric driver");
 MODULE_ALIAS("tcm_vhost");
 MODULE_LICENSE("GPL");
-module_init(tcm_vhost_init);
-module_exit(tcm_vhost_exit);
+module_init(vhost_scsi_init);
+module_exit(vhost_scsi_exit);
index 9ee5343d48849d85c1404538ce3f5433ad460d6a..3662f1d1d9cf0fc2f73c44fa4c34fb6e14c0af5f 100644 (file)
@@ -1127,7 +1127,7 @@ static int v9fs_vfs_setattr(struct dentry *dentry, struct iattr *iattr)
        }
 
        /* Write all dirty data */
-       if (S_ISREG(dentry->d_inode->i_mode))
+       if (d_is_reg(dentry))
                filemap_write_and_wait(dentry->d_inode->i_mapping);
 
        retval = p9_client_wstat(fid, &wstat);
index 118a2e0088d8fdd8391654a44edb06157dad5629..f8e52a1854c1ab383e32383ac65a0f167e385793 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1285,7 +1285,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
 
        ret = -EINVAL;
        if (unlikely(ctx || nr_events == 0)) {
-               pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
+               pr_debug("EINVAL: ctx %lu nr_events %u\n",
                         ctx, nr_events);
                goto out;
        }
@@ -1333,7 +1333,7 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
 
                return ret;
        }
-       pr_debug("EINVAL: io_destroy: invalid context id\n");
+       pr_debug("EINVAL: invalid context id\n");
        return -EINVAL;
 }
 
@@ -1515,7 +1515,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
            (iocb->aio_nbytes != (size_t)iocb->aio_nbytes) ||
            ((ssize_t)iocb->aio_nbytes < 0)
           )) {
-               pr_debug("EINVAL: io_submit: overflow check\n");
+               pr_debug("EINVAL: overflow check\n");
                return -EINVAL;
        }
 
index aaf96cb25452cf04a5d7a47f2b506486e67cd502..ac7d921ed9844b0a0c6afd0e6d4eaf4ca718f955 100644 (file)
@@ -95,7 +95,7 @@ static int check_dev_ioctl_version(int cmd, struct autofs_dev_ioctl *param)
  */
 static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *in)
 {
-       struct autofs_dev_ioctl tmp;
+       struct autofs_dev_ioctl tmp, *res;
 
        if (copy_from_user(&tmp, in, sizeof(tmp)))
                return ERR_PTR(-EFAULT);
@@ -106,7 +106,11 @@ static struct autofs_dev_ioctl *copy_dev_ioctl(struct autofs_dev_ioctl __user *i
        if (tmp.size > (PATH_MAX + sizeof(tmp)))
                return ERR_PTR(-ENAMETOOLONG);
 
-       return memdup_user(in, tmp.size);
+       res = memdup_user(in, tmp.size);
+       if (!IS_ERR(res))
+               res->size = tmp.size;
+
+       return res;
 }
 
 static inline void free_dev_ioctl(struct autofs_dev_ioctl *param)
index bfdbaba9c2ba40e7216d7435d69d45016c9ec5ff..11dd118f75e25e6a8f25f2143724901fb64c72a0 100644 (file)
@@ -374,7 +374,7 @@ static struct dentry *should_expire(struct dentry *dentry,
                return NULL;
        }
 
-       if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) {
+       if (dentry->d_inode && d_is_symlink(dentry)) {
                DPRINTK("checking symlink %p %pd", dentry, dentry);
                /*
                 * A symlink can't be "busy" in the usual sense so
index dbb5b7212ce162130727256d49351e3f2d2677c9..7e44fdd03e2dd0a684036be49e7c2152c192ee63 100644 (file)
@@ -108,7 +108,7 @@ static int autofs4_dir_open(struct inode *inode, struct file *file)
        struct dentry *dentry = file->f_path.dentry;
        struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
 
-       DPRINTK("file=%p dentry=%p %pD", file, dentry, dentry);
+       DPRINTK("file=%p dentry=%p %pd", file, dentry, dentry);
 
        if (autofs4_oz_mode(sbi))
                goto out;
@@ -371,7 +371,7 @@ static struct vfsmount *autofs4_d_automount(struct path *path)
         * having d_mountpoint() true, so there's no need to call back
         * to the daemon.
         */
-       if (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)) {
+       if (dentry->d_inode && d_is_symlink(dentry)) {
                spin_unlock(&sbi->fs_lock);
                goto done;
        }
@@ -485,7 +485,7 @@ static int autofs4_d_manage(struct dentry *dentry, bool rcu_walk)
                 * an incorrect ELOOP error return.
                 */
                if ((!d_mountpoint(dentry) && !simple_empty(dentry)) ||
-                   (dentry->d_inode && S_ISLNK(dentry->d_inode->i_mode)))
+                   (dentry->d_inode && d_is_symlink(dentry)))
                        status = -EISDIR;
        }
        spin_unlock(&sbi->fs_lock);
index afd2b4408adf53d78c716043c847ba48cf5c3bef..861b1e1c477710faced77767a0327c1d6e6b79c5 100644 (file)
 #include <linux/namei.h>
 #include <linux/poll.h>
 
-
-static loff_t bad_file_llseek(struct file *file, loff_t offset, int whence)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_read(struct file *filp, char __user *buf,
-                       size_t size, loff_t *ppos)
-{
-        return -EIO;
-}
-
-static ssize_t bad_file_write(struct file *filp, const char __user *buf,
-                       size_t siz, loff_t *ppos)
-{
-        return -EIO;
-}
-
-static ssize_t bad_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
-                       unsigned long nr_segs, loff_t pos)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
-                       unsigned long nr_segs, loff_t pos)
-{
-       return -EIO;
-}
-
-static int bad_file_readdir(struct file *file, struct dir_context *ctx)
-{
-       return -EIO;
-}
-
-static unsigned int bad_file_poll(struct file *filp, poll_table *wait)
-{
-       return POLLERR;
-}
-
-static long bad_file_unlocked_ioctl(struct file *file, unsigned cmd,
-                       unsigned long arg)
-{
-       return -EIO;
-}
-
-static long bad_file_compat_ioctl(struct file *file, unsigned int cmd,
-                       unsigned long arg)
-{
-       return -EIO;
-}
-
-static int bad_file_mmap(struct file *file, struct vm_area_struct *vma)
-{
-       return -EIO;
-}
-
 static int bad_file_open(struct inode *inode, struct file *filp)
 {
        return -EIO;
 }
 
-static int bad_file_flush(struct file *file, fl_owner_t id)
-{
-       return -EIO;
-}
-
-static int bad_file_release(struct inode *inode, struct file *filp)
-{
-       return -EIO;
-}
-
-static int bad_file_fsync(struct file *file, loff_t start, loff_t end,
-                         int datasync)
-{
-       return -EIO;
-}
-
-static int bad_file_aio_fsync(struct kiocb *iocb, int datasync)
-{
-       return -EIO;
-}
-
-static int bad_file_fasync(int fd, struct file *filp, int on)
-{
-       return -EIO;
-}
-
-static int bad_file_lock(struct file *file, int cmd, struct file_lock *fl)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_sendpage(struct file *file, struct page *page,
-                       int off, size_t len, loff_t *pos, int more)
-{
-       return -EIO;
-}
-
-static unsigned long bad_file_get_unmapped_area(struct file *file,
-                               unsigned long addr, unsigned long len,
-                               unsigned long pgoff, unsigned long flags)
-{
-       return -EIO;
-}
-
-static int bad_file_check_flags(int flags)
-{
-       return -EIO;
-}
-
-static int bad_file_flock(struct file *filp, int cmd, struct file_lock *fl)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_splice_write(struct pipe_inode_info *pipe,
-                       struct file *out, loff_t *ppos, size_t len,
-                       unsigned int flags)
-{
-       return -EIO;
-}
-
-static ssize_t bad_file_splice_read(struct file *in, loff_t *ppos,
-                       struct pipe_inode_info *pipe, size_t len,
-                       unsigned int flags)
-{
-       return -EIO;
-}
-
 static const struct file_operations bad_file_ops =
 {
-       .llseek         = bad_file_llseek,
-       .read           = bad_file_read,
-       .write          = bad_file_write,
-       .aio_read       = bad_file_aio_read,
-       .aio_write      = bad_file_aio_write,
-       .iterate        = bad_file_readdir,
-       .poll           = bad_file_poll,
-       .unlocked_ioctl = bad_file_unlocked_ioctl,
-       .compat_ioctl   = bad_file_compat_ioctl,
-       .mmap           = bad_file_mmap,
        .open           = bad_file_open,
-       .flush          = bad_file_flush,
-       .release        = bad_file_release,
-       .fsync          = bad_file_fsync,
-       .aio_fsync      = bad_file_aio_fsync,
-       .fasync         = bad_file_fasync,
-       .lock           = bad_file_lock,
-       .sendpage       = bad_file_sendpage,
-       .get_unmapped_area = bad_file_get_unmapped_area,
-       .check_flags    = bad_file_check_flags,
-       .flock          = bad_file_flock,
-       .splice_write   = bad_file_splice_write,
-       .splice_read    = bad_file_splice_read,
 };
 
 static int bad_inode_create (struct inode *dir, struct dentry *dentry,
index 02b16910f4c9d500619286029ee16f0815bce269..995986b8e36b8f3fd8529582c50e545d9b26322e 100644 (file)
@@ -645,11 +645,12 @@ out:
 
 static unsigned long randomize_stack_top(unsigned long stack_top)
 {
-       unsigned int random_variable = 0;
+       unsigned long random_variable = 0;
 
        if ((current->flags & PF_RANDOMIZE) &&
                !(current->personality & ADDR_NO_RANDOMIZE)) {
-               random_variable = get_random_int() & STACK_RND_MASK;
+               random_variable = (unsigned long) get_random_int();
+               random_variable &= STACK_RND_MASK;
                random_variable <<= PAGE_SHIFT;
        }
 #ifdef CONFIG_STACK_GROWSUP
index 8729cf68d2fef5e41540283d74beba55285f59c5..f55721ff938544c1b73d0d8cb057a52dba5b7884 100644 (file)
@@ -1246,25 +1246,6 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
        return ret;
 }
 
-/*
- * this makes the path point to (inum INODE_ITEM ioff)
- */
-int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
-                       struct btrfs_path *path)
-{
-       struct btrfs_key key;
-       return btrfs_find_item(fs_root, path, inum, ioff,
-                       BTRFS_INODE_ITEM_KEY, &key);
-}
-
-static int inode_ref_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
-                               struct btrfs_path *path,
-                               struct btrfs_key *found_key)
-{
-       return btrfs_find_item(fs_root, path, inum, ioff,
-                       BTRFS_INODE_REF_KEY, found_key);
-}
-
 int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
                          u64 start_off, struct btrfs_path *path,
                          struct btrfs_inode_extref **ret_extref,
@@ -1374,7 +1355,8 @@ char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
                        btrfs_tree_read_unlock_blocking(eb);
                        free_extent_buffer(eb);
                }
-               ret = inode_ref_info(parent, 0, fs_root, path, &found_key);
+               ret = btrfs_find_item(fs_root, path, parent, 0,
+                               BTRFS_INODE_REF_KEY, &found_key);
                if (ret > 0)
                        ret = -ENOENT;
                if (ret)
@@ -1727,8 +1709,10 @@ static int iterate_inode_refs(u64 inum, struct btrfs_root *fs_root,
        struct btrfs_key found_key;
 
        while (!ret) {
-               ret = inode_ref_info(inum, parent ? parent+1 : 0, fs_root, path,
-                                    &found_key);
+               ret = btrfs_find_item(fs_root, path, inum,
+                               parent ? parent + 1 : 0, BTRFS_INODE_REF_KEY,
+                               &found_key);
+
                if (ret < 0)
                        break;
                if (ret) {
index 2a1ac6bfc724637f3a80ac15c6ce6237dc7998f0..9c41fbac30091f39bad52a3f21ebf6e931db364b 100644 (file)
@@ -32,9 +32,6 @@ struct inode_fs_paths {
 typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
                void *ctx);
 
-int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
-                       struct btrfs_path *path);
-
 int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
                        struct btrfs_path *path, struct btrfs_key *found_key,
                        u64 *flags);
index 4aadadcfab20178d734ad395c1448676603a01b8..de5e4f2adfeac9d07ba2539a8098b44cc9781580 100644 (file)
@@ -185,6 +185,9 @@ struct btrfs_inode {
 
        struct btrfs_delayed_node *delayed_node;
 
+       /* File creation time. */
+       struct timespec i_otime;
+
        struct inode vfs_inode;
 };
 
index 14a72ed14ef7b1c2a5b78688c0b316d129b2bb72..993642199326a757f55c78dbc2722ecc2b409b60 100644 (file)
@@ -213,11 +213,19 @@ static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
  */
 static void add_root_to_dirty_list(struct btrfs_root *root)
 {
+       if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
+           !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
+               return;
+
        spin_lock(&root->fs_info->trans_lock);
-       if (test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state) &&
-           list_empty(&root->dirty_list)) {
-               list_add(&root->dirty_list,
-                        &root->fs_info->dirty_cowonly_roots);
+       if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
+               /* Want the extent tree to be the last on the list */
+               if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
+                       list_move_tail(&root->dirty_list,
+                                      &root->fs_info->dirty_cowonly_roots);
+               else
+                       list_move(&root->dirty_list,
+                                 &root->fs_info->dirty_cowonly_roots);
        }
        spin_unlock(&root->fs_info->trans_lock);
 }
@@ -1363,8 +1371,7 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
 
        if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
                BUG_ON(tm->slot != 0);
-               eb_rewin = alloc_dummy_extent_buffer(eb->start,
-                                               fs_info->tree_root->nodesize);
+               eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
                if (!eb_rewin) {
                        btrfs_tree_read_unlock_blocking(eb);
                        free_extent_buffer(eb);
@@ -1444,7 +1451,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
        } else if (old_root) {
                btrfs_tree_read_unlock(eb_root);
                free_extent_buffer(eb_root);
-               eb = alloc_dummy_extent_buffer(logical, root->nodesize);
+               eb = alloc_dummy_extent_buffer(root->fs_info, logical);
        } else {
                btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
                eb = btrfs_clone_extent_buffer(eb_root);
@@ -2282,7 +2289,7 @@ static void reada_for_search(struct btrfs_root *root,
                if ((search <= target && target - search <= 65536) ||
                    (search > target && search - target <= 65536)) {
                        gen = btrfs_node_ptr_generation(node, nr);
-                       readahead_tree_block(root, search, blocksize);
+                       readahead_tree_block(root, search);
                        nread += blocksize;
                }
                nscan++;
@@ -2301,7 +2308,6 @@ static noinline void reada_for_balance(struct btrfs_root *root,
        u64 gen;
        u64 block1 = 0;
        u64 block2 = 0;
-       int blocksize;
 
        parent = path->nodes[level + 1];
        if (!parent)
@@ -2309,7 +2315,6 @@ static noinline void reada_for_balance(struct btrfs_root *root,
 
        nritems = btrfs_header_nritems(parent);
        slot = path->slots[level + 1];
-       blocksize = root->nodesize;
 
        if (slot > 0) {
                block1 = btrfs_node_blockptr(parent, slot - 1);
@@ -2334,9 +2339,9 @@ static noinline void reada_for_balance(struct btrfs_root *root,
        }
 
        if (block1)
-               readahead_tree_block(root, block1, blocksize);
+               readahead_tree_block(root, block1);
        if (block2)
-               readahead_tree_block(root, block2, blocksize);
+               readahead_tree_block(root, block2);
 }
 
 
@@ -2609,32 +2614,24 @@ static int key_search(struct extent_buffer *b, struct btrfs_key *key,
        return 0;
 }
 
-int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *found_path,
+int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
                u64 iobjectid, u64 ioff, u8 key_type,
                struct btrfs_key *found_key)
 {
        int ret;
        struct btrfs_key key;
        struct extent_buffer *eb;
-       struct btrfs_path *path;
+
+       ASSERT(path);
+       ASSERT(found_key);
 
        key.type = key_type;
        key.objectid = iobjectid;
        key.offset = ioff;
 
-       if (found_path == NULL) {
-               path = btrfs_alloc_path();
-               if (!path)
-                       return -ENOMEM;
-       } else
-               path = found_path;
-
        ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
-       if ((ret < 0) || (found_key == NULL)) {
-               if (path != found_path)
-                       btrfs_free_path(path);
+       if (ret < 0)
                return ret;
-       }
 
        eb = path->nodes[0];
        if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
@@ -3383,7 +3380,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
        add_root_to_dirty_list(root);
        extent_buffer_get(c);
        path->nodes[level] = c;
-       path->locks[level] = BTRFS_WRITE_LOCK;
+       path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
        path->slots[level] = 0;
        return 0;
 }
@@ -4356,13 +4353,15 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
        path->search_for_split = 1;
        ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
        path->search_for_split = 0;
+       if (ret > 0)
+               ret = -EAGAIN;
        if (ret < 0)
                goto err;
 
        ret = -EAGAIN;
        leaf = path->nodes[0];
-       /* if our item isn't there or got smaller, return now */
-       if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
+       /* if our item isn't there, return now */
+       if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
                goto err;
 
        /* the leaf has  changed, it now has room.  return now */
index 0b180708bf79d87a36c9dcc78bbd6d72772101df..84c3b00f3de8eedf47ba3bec71939a5fba1f90a0 100644 (file)
@@ -198,6 +198,8 @@ static int btrfs_csum_sizes[] = { 4, 0 };
 
 #define BTRFS_DIRTY_METADATA_THRESH    (32 * 1024 * 1024)
 
+#define BTRFS_MAX_EXTENT_SIZE (128 * 1024 * 1024)
+
 /*
  * The key defines the order in the tree, and so it also defines (optimal)
  * block layout.
@@ -1020,6 +1022,9 @@ enum btrfs_raid_types {
                                         BTRFS_BLOCK_GROUP_RAID6 |   \
                                         BTRFS_BLOCK_GROUP_DUP |     \
                                         BTRFS_BLOCK_GROUP_RAID10)
+#define BTRFS_BLOCK_GROUP_RAID56_MASK  (BTRFS_BLOCK_GROUP_RAID5 |   \
+                                        BTRFS_BLOCK_GROUP_RAID6)
+
 /*
  * We need a bit for restriper to be able to tell when chunks of type
  * SINGLE are available.  This "extended" profile format is used in
@@ -1239,7 +1244,6 @@ enum btrfs_disk_cache_state {
        BTRFS_DC_ERROR          = 1,
        BTRFS_DC_CLEAR          = 2,
        BTRFS_DC_SETUP          = 3,
-       BTRFS_DC_NEED_WRITE     = 4,
 };
 
 struct btrfs_caching_control {
@@ -1277,7 +1281,6 @@ struct btrfs_block_group_cache {
        unsigned long full_stripe_len;
 
        unsigned int ro:1;
-       unsigned int dirty:1;
        unsigned int iref:1;
        unsigned int has_caching_ctl:1;
        unsigned int removed:1;
@@ -1315,6 +1318,9 @@ struct btrfs_block_group_cache {
        struct list_head ro_list;
 
        atomic_t trimming;
+
+       /* For dirty block groups */
+       struct list_head dirty_list;
 };
 
 /* delayed seq elem */
@@ -1741,6 +1747,7 @@ struct btrfs_fs_info {
 
        spinlock_t unused_bgs_lock;
        struct list_head unused_bgs;
+       struct mutex unused_bg_unpin_mutex;
 
        /* For btrfs to record security options */
        struct security_mnt_opts security_opts;
@@ -1776,6 +1783,7 @@ struct btrfs_subvolume_writers {
 #define BTRFS_ROOT_DEFRAG_RUNNING      6
 #define BTRFS_ROOT_FORCE_COW           7
 #define BTRFS_ROOT_MULTI_LOG_TASKS     8
+#define BTRFS_ROOT_DIRTY               9
 
 /*
  * in ram representation of the tree.  extent_root is used for all allocations
@@ -1794,8 +1802,6 @@ struct btrfs_root {
        struct btrfs_fs_info *fs_info;
        struct extent_io_tree dirty_log_pages;
 
-       struct kobject root_kobj;
-       struct completion kobj_unregister;
        struct mutex objectid_mutex;
 
        spinlock_t accounting_lock;
@@ -2465,31 +2471,6 @@ BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32);
 BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32);
 BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64);
 BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64);
-
-static inline struct btrfs_timespec *
-btrfs_inode_atime(struct btrfs_inode_item *inode_item)
-{
-       unsigned long ptr = (unsigned long)inode_item;
-       ptr += offsetof(struct btrfs_inode_item, atime);
-       return (struct btrfs_timespec *)ptr;
-}
-
-static inline struct btrfs_timespec *
-btrfs_inode_mtime(struct btrfs_inode_item *inode_item)
-{
-       unsigned long ptr = (unsigned long)inode_item;
-       ptr += offsetof(struct btrfs_inode_item, mtime);
-       return (struct btrfs_timespec *)ptr;
-}
-
-static inline struct btrfs_timespec *
-btrfs_inode_ctime(struct btrfs_inode_item *inode_item)
-{
-       unsigned long ptr = (unsigned long)inode_item;
-       ptr += offsetof(struct btrfs_inode_item, ctime);
-       return (struct btrfs_timespec *)ptr;
-}
-
 BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64);
 BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32);
 BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64);
index de4e70fb3cbbd4a5c28d13f1fe3aec16733ed49f..82f0c7c954747363859fff768917f34ec67765ab 100644 (file)
@@ -1755,27 +1755,31 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
        btrfs_set_stack_inode_block_group(inode_item, 0);
 
-       btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item),
+       btrfs_set_stack_timespec_sec(&inode_item->atime,
                                     inode->i_atime.tv_sec);
-       btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item),
+       btrfs_set_stack_timespec_nsec(&inode_item->atime,
                                      inode->i_atime.tv_nsec);
 
-       btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item),
+       btrfs_set_stack_timespec_sec(&inode_item->mtime,
                                     inode->i_mtime.tv_sec);
-       btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item),
+       btrfs_set_stack_timespec_nsec(&inode_item->mtime,
                                      inode->i_mtime.tv_nsec);
 
-       btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item),
+       btrfs_set_stack_timespec_sec(&inode_item->ctime,
                                     inode->i_ctime.tv_sec);
-       btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item),
+       btrfs_set_stack_timespec_nsec(&inode_item->ctime,
                                      inode->i_ctime.tv_nsec);
+
+       btrfs_set_stack_timespec_sec(&inode_item->otime,
+                                    BTRFS_I(inode)->i_otime.tv_sec);
+       btrfs_set_stack_timespec_nsec(&inode_item->otime,
+                                    BTRFS_I(inode)->i_otime.tv_nsec);
 }
 
 int btrfs_fill_inode(struct inode *inode, u32 *rdev)
 {
        struct btrfs_delayed_node *delayed_node;
        struct btrfs_inode_item *inode_item;
-       struct btrfs_timespec *tspec;
 
        delayed_node = btrfs_get_delayed_node(inode);
        if (!delayed_node)
@@ -1802,17 +1806,19 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
        *rdev = btrfs_stack_inode_rdev(inode_item);
        BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
 
-       tspec = btrfs_inode_atime(inode_item);
-       inode->i_atime.tv_sec = btrfs_stack_timespec_sec(tspec);
-       inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+       inode->i_atime.tv_sec = btrfs_stack_timespec_sec(&inode_item->atime);
+       inode->i_atime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->atime);
+
+       inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(&inode_item->mtime);
+       inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->mtime);
 
-       tspec = btrfs_inode_mtime(inode_item);
-       inode->i_mtime.tv_sec = btrfs_stack_timespec_sec(tspec);
-       inode->i_mtime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+       inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(&inode_item->ctime);
+       inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(&inode_item->ctime);
 
-       tspec = btrfs_inode_ctime(inode_item);
-       inode->i_ctime.tv_sec = btrfs_stack_timespec_sec(tspec);
-       inode->i_ctime.tv_nsec = btrfs_stack_timespec_nsec(tspec);
+       BTRFS_I(inode)->i_otime.tv_sec =
+               btrfs_stack_timespec_sec(&inode_item->otime);
+       BTRFS_I(inode)->i_otime.tv_nsec =
+               btrfs_stack_timespec_nsec(&inode_item->otime);
 
        inode->i_generation = BTRFS_I(inode)->generation;
        BTRFS_I(inode)->index_cnt = (u64)-1;
index ca6a3a3b6b6c4cdbfac15f79f003823149493181..5ec03d999c37bfb3e48d937abd79b6dc57cd5257 100644 (file)
@@ -440,18 +440,9 @@ leave:
  */
 static void btrfs_rm_dev_replace_blocked(struct btrfs_fs_info *fs_info)
 {
-       s64 writers;
-       DEFINE_WAIT(wait);
-
        set_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state);
-       do {
-               prepare_to_wait(&fs_info->replace_wait, &wait,
-                               TASK_UNINTERRUPTIBLE);
-               writers = percpu_counter_sum(&fs_info->bio_counter);
-               if (writers)
-                       schedule();
-               finish_wait(&fs_info->replace_wait, &wait);
-       } while (writers);
+       wait_event(fs_info->replace_wait, !percpu_counter_sum(
+                  &fs_info->bio_counter));
 }
 
 /*
@@ -932,15 +923,15 @@ void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
 
 void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info)
 {
-       DEFINE_WAIT(wait);
-again:
-       percpu_counter_inc(&fs_info->bio_counter);
-       if (test_bit(BTRFS_FS_STATE_DEV_REPLACING, &fs_info->fs_state)) {
+       while (1) {
+               percpu_counter_inc(&fs_info->bio_counter);
+               if (likely(!test_bit(BTRFS_FS_STATE_DEV_REPLACING,
+                                    &fs_info->fs_state)))
+                       break;
+
                btrfs_bio_counter_dec(fs_info);
                wait_event(fs_info->replace_wait,
                           !test_bit(BTRFS_FS_STATE_DEV_REPLACING,
                                     &fs_info->fs_state));
-               goto again;
        }
-
 }
index 1afb18226da82c9bff5870c2f7770b164d7f68b2..f79f38542a737631e191e08aed6b50d4a940f964 100644 (file)
@@ -318,7 +318,7 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf,
                        memcpy(&found, result, csum_size);
 
                        read_extent_buffer(buf, &val, 0, csum_size);
-                       printk_ratelimited(KERN_INFO
+                       printk_ratelimited(KERN_WARNING
                                "BTRFS: %s checksum verify failed on %llu wanted %X found %X "
                                "level %d\n",
                                root->fs_info->sb->s_id, buf->start,
@@ -367,7 +367,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
                ret = 0;
                goto out;
        }
-       printk_ratelimited(KERN_INFO "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n",
+       printk_ratelimited(KERN_ERR
+           "BTRFS (device %s): parent transid verify failed on %llu wanted %llu found %llu\n",
                        eb->fs_info->sb->s_id, eb->start,
                        parent_transid, btrfs_header_generation(eb));
        ret = 1;
@@ -633,21 +634,21 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio,
 
        found_start = btrfs_header_bytenr(eb);
        if (found_start != eb->start) {
-               printk_ratelimited(KERN_INFO "BTRFS (device %s): bad tree block start "
+               printk_ratelimited(KERN_ERR "BTRFS (device %s): bad tree block start "
                               "%llu %llu\n",
                               eb->fs_info->sb->s_id, found_start, eb->start);
                ret = -EIO;
                goto err;
        }
        if (check_tree_block_fsid(root, eb)) {
-               printk_ratelimited(KERN_INFO "BTRFS (device %s): bad fsid on block %llu\n",
+               printk_ratelimited(KERN_ERR "BTRFS (device %s): bad fsid on block %llu\n",
                               eb->fs_info->sb->s_id, eb->start);
                ret = -EIO;
                goto err;
        }
        found_level = btrfs_header_level(eb);
        if (found_level >= BTRFS_MAX_LEVEL) {
-               btrfs_info(root->fs_info, "bad tree block level %d",
+               btrfs_err(root->fs_info, "bad tree block level %d",
                           (int)btrfs_header_level(eb));
                ret = -EIO;
                goto err;
@@ -1073,12 +1074,12 @@ static const struct address_space_operations btree_aops = {
        .set_page_dirty = btree_set_page_dirty,
 };
 
-void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize)
+void readahead_tree_block(struct btrfs_root *root, u64 bytenr)
 {
        struct extent_buffer *buf = NULL;
        struct inode *btree_inode = root->fs_info->btree_inode;
 
-       buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+       buf = btrfs_find_create_tree_block(root, bytenr);
        if (!buf)
                return;
        read_extent_buffer_pages(&BTRFS_I(btree_inode)->io_tree,
@@ -1086,7 +1087,7 @@ void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize)
        free_extent_buffer(buf);
 }
 
-int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
+int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
                         int mirror_num, struct extent_buffer **eb)
 {
        struct extent_buffer *buf = NULL;
@@ -1094,7 +1095,7 @@ int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
        struct extent_io_tree *io_tree = &BTRFS_I(btree_inode)->io_tree;
        int ret;
 
-       buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+       buf = btrfs_find_create_tree_block(root, bytenr);
        if (!buf)
                return 0;
 
@@ -1125,12 +1126,11 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root,
 }
 
 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
-                                                u64 bytenr, u32 blocksize)
+                                                u64 bytenr)
 {
        if (btrfs_test_is_dummy_root(root))
-               return alloc_test_extent_buffer(root->fs_info, bytenr,
-                                               blocksize);
-       return alloc_extent_buffer(root->fs_info, bytenr, blocksize);
+               return alloc_test_extent_buffer(root->fs_info, bytenr);
+       return alloc_extent_buffer(root->fs_info, bytenr);
 }
 
 
@@ -1152,7 +1152,7 @@ struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
        struct extent_buffer *buf = NULL;
        int ret;
 
-       buf = btrfs_find_create_tree_block(root, bytenr, root->nodesize);
+       buf = btrfs_find_create_tree_block(root, bytenr);
        if (!buf)
                return NULL;
 
@@ -1275,12 +1275,10 @@ static void __setup_root(u32 nodesize, u32 sectorsize, u32 stripesize,
        memset(&root->root_key, 0, sizeof(root->root_key));
        memset(&root->root_item, 0, sizeof(root->root_item));
        memset(&root->defrag_progress, 0, sizeof(root->defrag_progress));
-       memset(&root->root_kobj, 0, sizeof(root->root_kobj));
        if (fs_info)
                root->defrag_trans_start = fs_info->generation;
        else
                root->defrag_trans_start = 0;
-       init_completion(&root->kobj_unregister);
        root->root_key.objectid = objectid;
        root->anon_dev = 0;
 
@@ -1630,6 +1628,8 @@ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info,
                                     bool check_ref)
 {
        struct btrfs_root *root;
+       struct btrfs_path *path;
+       struct btrfs_key key;
        int ret;
 
        if (location->objectid == BTRFS_ROOT_TREE_OBJECTID)
@@ -1669,8 +1669,17 @@ again:
        if (ret)
                goto fail;
 
-       ret = btrfs_find_item(fs_info->tree_root, NULL, BTRFS_ORPHAN_OBJECTID,
-                       location->objectid, BTRFS_ORPHAN_ITEM_KEY, NULL);
+       path = btrfs_alloc_path();
+       if (!path) {
+               ret = -ENOMEM;
+               goto fail;
+       }
+       key.objectid = BTRFS_ORPHAN_OBJECTID;
+       key.type = BTRFS_ORPHAN_ITEM_KEY;
+       key.offset = location->objectid;
+
+       ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
+       btrfs_free_path(path);
        if (ret < 0)
                goto fail;
        if (ret == 0)
@@ -2232,6 +2241,7 @@ int open_ctree(struct super_block *sb,
        spin_lock_init(&fs_info->qgroup_op_lock);
        spin_lock_init(&fs_info->buffer_lock);
        spin_lock_init(&fs_info->unused_bgs_lock);
+       mutex_init(&fs_info->unused_bg_unpin_mutex);
        rwlock_init(&fs_info->tree_mod_log_lock);
        mutex_init(&fs_info->reloc_mutex);
        mutex_init(&fs_info->delalloc_root_mutex);
@@ -2496,7 +2506,7 @@ int open_ctree(struct super_block *sb,
                features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
 
        if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
-               printk(KERN_ERR "BTRFS: has skinny extents\n");
+               printk(KERN_INFO "BTRFS: has skinny extents\n");
 
        /*
         * flag our filesystem as having big metadata blocks if
@@ -2520,7 +2530,7 @@ int open_ctree(struct super_block *sb,
         */
        if ((features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
            (sectorsize != nodesize)) {
-               printk(KERN_WARNING "BTRFS: unequal leaf/node/sector sizes "
+               printk(KERN_ERR "BTRFS: unequal leaf/node/sector sizes "
                                "are not allowed for mixed block groups on %s\n",
                                sb->s_id);
                goto fail_alloc;
@@ -2628,12 +2638,12 @@ int open_ctree(struct super_block *sb,
        sb->s_blocksize_bits = blksize_bits(sectorsize);
 
        if (btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
-               printk(KERN_INFO "BTRFS: valid FS not found on %s\n", sb->s_id);
+               printk(KERN_ERR "BTRFS: valid FS not found on %s\n", sb->s_id);
                goto fail_sb_buffer;
        }
 
        if (sectorsize != PAGE_SIZE) {
-               printk(KERN_WARNING "BTRFS: Incompatible sector size(%lu) "
+               printk(KERN_ERR "BTRFS: incompatible sector size (%lu) "
                       "found on %s\n", (unsigned long)sectorsize, sb->s_id);
                goto fail_sb_buffer;
        }
@@ -2642,7 +2652,7 @@ int open_ctree(struct super_block *sb,
        ret = btrfs_read_sys_array(tree_root);
        mutex_unlock(&fs_info->chunk_mutex);
        if (ret) {
-               printk(KERN_WARNING "BTRFS: failed to read the system "
+               printk(KERN_ERR "BTRFS: failed to read the system "
                       "array on %s\n", sb->s_id);
                goto fail_sb_buffer;
        }
@@ -2657,7 +2667,7 @@ int open_ctree(struct super_block *sb,
                                           generation);
        if (!chunk_root->node ||
            !test_bit(EXTENT_BUFFER_UPTODATE, &chunk_root->node->bflags)) {
-               printk(KERN_WARNING "BTRFS: failed to read chunk root on %s\n",
+               printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
                       sb->s_id);
                goto fail_tree_roots;
        }
@@ -2669,7 +2679,7 @@ int open_ctree(struct super_block *sb,
 
        ret = btrfs_read_chunk_tree(chunk_root);
        if (ret) {
-               printk(KERN_WARNING "BTRFS: failed to read chunk tree on %s\n",
+               printk(KERN_ERR "BTRFS: failed to read chunk tree on %s\n",
                       sb->s_id);
                goto fail_tree_roots;
        }
@@ -2681,7 +2691,7 @@ int open_ctree(struct super_block *sb,
        btrfs_close_extra_devices(fs_info, fs_devices, 0);
 
        if (!fs_devices->latest_bdev) {
-               printk(KERN_CRIT "BTRFS: failed to read devices on %s\n",
+               printk(KERN_ERR "BTRFS: failed to read devices on %s\n",
                       sb->s_id);
                goto fail_tree_roots;
        }
@@ -2765,7 +2775,7 @@ retry_root_backup:
 
        ret = btrfs_recover_balance(fs_info);
        if (ret) {
-               printk(KERN_WARNING "BTRFS: failed to recover balance\n");
+               printk(KERN_ERR "BTRFS: failed to recover balance\n");
                goto fail_block_groups;
        }
 
@@ -3860,6 +3870,21 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
                printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
                                btrfs_super_log_root(sb));
 
+       /*
+        * Check the lower bound, the alignment and other constraints are
+        * checked later.
+        */
+       if (btrfs_super_nodesize(sb) < 4096) {
+               printk(KERN_ERR "BTRFS: nodesize too small: %u < 4096\n",
+                               btrfs_super_nodesize(sb));
+               ret = -EINVAL;
+       }
+       if (btrfs_super_sectorsize(sb) < 4096) {
+               printk(KERN_ERR "BTRFS: sectorsize too small: %u < 4096\n",
+                               btrfs_super_sectorsize(sb));
+               ret = -EINVAL;
+       }
+
        if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
                printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
                                fs_info->fsid, sb->dev_item.fsid);
@@ -3873,6 +3898,10 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
        if (btrfs_super_num_devices(sb) > (1UL << 31))
                printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
                                btrfs_super_num_devices(sb));
+       if (btrfs_super_num_devices(sb) == 0) {
+               printk(KERN_ERR "BTRFS: number of devices is 0\n");
+               ret = -EINVAL;
+       }
 
        if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
                printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
@@ -3880,6 +3909,25 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
                ret = -EINVAL;
        }
 
+       /*
+        * Obvious sys_chunk_array corruptions, it must hold at least one key
+        * and one chunk
+        */
+       if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
+               printk(KERN_ERR "BTRFS: system chunk array too big %u > %u\n",
+                               btrfs_super_sys_array_size(sb),
+                               BTRFS_SYSTEM_CHUNK_ARRAY_SIZE);
+               ret = -EINVAL;
+       }
+       if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key)
+                       + sizeof(struct btrfs_chunk)) {
+               printk(KERN_ERR "BTRFS: system chunk array too small %u < %lu\n",
+                               btrfs_super_sys_array_size(sb),
+                               sizeof(struct btrfs_disk_key)
+                               + sizeof(struct btrfs_chunk));
+               ret = -EINVAL;
+       }
+
        /*
         * The generation is a global counter, we'll trust it more than the others
         * but it's still possible that it's the one that's wrong.
index 414651821fb3b38a62df3a72fa44877f66b2e602..27d44c0fd2364df69eafd15f61bd6a215696f91f 100644 (file)
@@ -46,11 +46,11 @@ struct btrfs_fs_devices;
 
 struct extent_buffer *read_tree_block(struct btrfs_root *root, u64 bytenr,
                                      u64 parent_transid);
-void readahead_tree_block(struct btrfs_root *root, u64 bytenr, u32 blocksize);
-int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr, u32 blocksize,
+void readahead_tree_block(struct btrfs_root *root, u64 bytenr);
+int reada_tree_block_flagged(struct btrfs_root *root, u64 bytenr,
                         int mirror_num, struct extent_buffer **eb);
 struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
-                                                  u64 bytenr, u32 blocksize);
+                                                  u64 bytenr);
 void clean_tree_block(struct btrfs_trans_handle *trans,
                      struct btrfs_root *root, struct extent_buffer *buf);
 int open_ctree(struct super_block *sb,
index a684086c3c8123702cc41caa4d4dfe085aa7db3b..571f402d3fc46e5f0205451e85a7b78f3cc16b50 100644 (file)
@@ -74,8 +74,9 @@ enum {
        RESERVE_ALLOC_NO_ACCOUNT = 2,
 };
 
-static int update_block_group(struct btrfs_root *root,
-                             u64 bytenr, u64 num_bytes, int alloc);
+static int update_block_group(struct btrfs_trans_handle *trans,
+                             struct btrfs_root *root, u64 bytenr,
+                             u64 num_bytes, int alloc);
 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
                                u64 bytenr, u64 num_bytes, u64 parent,
@@ -1925,7 +1926,7 @@ int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
                         */
                        ret = 0;
                }
-               kfree(bbio);
+               btrfs_put_bbio(bbio);
        }
 
        if (actual_bytes)
@@ -2768,7 +2769,6 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_head *head;
        int ret;
        int run_all = count == (unsigned long)-1;
-       int run_most = 0;
 
        /* We'll clean this up in btrfs_cleanup_transaction */
        if (trans->aborted)
@@ -2778,10 +2778,8 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
                root = root->fs_info->tree_root;
 
        delayed_refs = &trans->transaction->delayed_refs;
-       if (count == 0) {
+       if (count == 0)
                count = atomic_read(&delayed_refs->num_entries) * 2;
-               run_most = 1;
-       }
 
 again:
 #ifdef SCRAMBLE_DELAYED_REFS
@@ -3315,120 +3313,42 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root)
 {
        struct btrfs_block_group_cache *cache;
-       int err = 0;
+       struct btrfs_transaction *cur_trans = trans->transaction;
+       int ret = 0;
        struct btrfs_path *path;
-       u64 last = 0;
+
+       if (list_empty(&cur_trans->dirty_bgs))
+               return 0;
 
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
 
-again:
-       while (1) {
-               cache = btrfs_lookup_first_block_group(root->fs_info, last);
-               while (cache) {
-                       if (cache->disk_cache_state == BTRFS_DC_CLEAR)
-                               break;
-                       cache = next_block_group(root, cache);
-               }
-               if (!cache) {
-                       if (last == 0)
-                               break;
-                       last = 0;
-                       continue;
-               }
-               err = cache_save_setup(cache, trans, path);
-               last = cache->key.objectid + cache->key.offset;
-               btrfs_put_block_group(cache);
-       }
-
-       while (1) {
-               if (last == 0) {
-                       err = btrfs_run_delayed_refs(trans, root,
-                                                    (unsigned long)-1);
-                       if (err) /* File system offline */
-                               goto out;
-               }
-
-               cache = btrfs_lookup_first_block_group(root->fs_info, last);
-               while (cache) {
-                       if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
-                               btrfs_put_block_group(cache);
-                               goto again;
-                       }
-
-                       if (cache->dirty)
-                               break;
-                       cache = next_block_group(root, cache);
-               }
-               if (!cache) {
-                       if (last == 0)
-                               break;
-                       last = 0;
-                       continue;
-               }
-
-               if (cache->disk_cache_state == BTRFS_DC_SETUP)
-                       cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
-               cache->dirty = 0;
-               last = cache->key.objectid + cache->key.offset;
-
-               err = write_one_cache_group(trans, root, path, cache);
-               btrfs_put_block_group(cache);
-               if (err) /* File system offline */
-                       goto out;
-       }
-
-       while (1) {
-               /*
-                * I don't think this is needed since we're just marking our
-                * preallocated extent as written, but just in case it can't
-                * hurt.
-                */
-               if (last == 0) {
-                       err = btrfs_run_delayed_refs(trans, root,
-                                                    (unsigned long)-1);
-                       if (err) /* File system offline */
-                               goto out;
-               }
-
-               cache = btrfs_lookup_first_block_group(root->fs_info, last);
-               while (cache) {
-                       /*
-                        * Really this shouldn't happen, but it could if we
-                        * couldn't write the entire preallocated extent and
-                        * splitting the extent resulted in a new block.
-                        */
-                       if (cache->dirty) {
-                               btrfs_put_block_group(cache);
-                               goto again;
-                       }
-                       if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
-                               break;
-                       cache = next_block_group(root, cache);
-               }
-               if (!cache) {
-                       if (last == 0)
-                               break;
-                       last = 0;
-                       continue;
-               }
-
-               err = btrfs_write_out_cache(root, trans, cache, path);
-
-               /*
-                * If we didn't have an error then the cache state is still
-                * NEED_WRITE, so we can set it to WRITTEN.
-                */
-               if (!err && cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
-                       cache->disk_cache_state = BTRFS_DC_WRITTEN;
-               last = cache->key.objectid + cache->key.offset;
+       /*
+        * We don't need the lock here since we are protected by the transaction
+        * commit.  We want to do the cache_save_setup first and then run the
+        * delayed refs to make sure we have the best chance at doing this all
+        * in one shot.
+        */
+       while (!list_empty(&cur_trans->dirty_bgs)) {
+               cache = list_first_entry(&cur_trans->dirty_bgs,
+                                        struct btrfs_block_group_cache,
+                                        dirty_list);
+               list_del_init(&cache->dirty_list);
+               if (cache->disk_cache_state == BTRFS_DC_CLEAR)
+                       cache_save_setup(cache, trans, path);
+               if (!ret)
+                       ret = btrfs_run_delayed_refs(trans, root,
+                                                    (unsigned long) -1);
+               if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP)
+                       btrfs_write_out_cache(root, trans, cache, path);
+               if (!ret)
+                       ret = write_one_cache_group(trans, root, path, cache);
                btrfs_put_block_group(cache);
        }
-out:
 
        btrfs_free_path(path);
-       return err;
+       return ret;
 }
 
 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
@@ -5043,19 +4963,25 @@ void btrfs_subvolume_release_metadata(struct btrfs_root *root,
 /**
  * drop_outstanding_extent - drop an outstanding extent
  * @inode: the inode we're dropping the extent for
+ * @num_bytes: the number of bytes we're relaseing.
  *
  * This is called when we are freeing up an outstanding extent, either called
  * after an error or after an extent is written.  This will return the number of
  * reserved extents that need to be freed.  This must be called with
  * BTRFS_I(inode)->lock held.
  */
-static unsigned drop_outstanding_extent(struct inode *inode)
+static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
 {
        unsigned drop_inode_space = 0;
        unsigned dropped_extents = 0;
+       unsigned num_extents = 0;
 
-       BUG_ON(!BTRFS_I(inode)->outstanding_extents);
-       BTRFS_I(inode)->outstanding_extents--;
+       num_extents = (unsigned)div64_u64(num_bytes +
+                                         BTRFS_MAX_EXTENT_SIZE - 1,
+                                         BTRFS_MAX_EXTENT_SIZE);
+       ASSERT(num_extents);
+       ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
+       BTRFS_I(inode)->outstanding_extents -= num_extents;
 
        if (BTRFS_I(inode)->outstanding_extents == 0 &&
            test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
@@ -5226,7 +5152,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
 
 out_fail:
        spin_lock(&BTRFS_I(inode)->lock);
-       dropped = drop_outstanding_extent(inode);
+       dropped = drop_outstanding_extent(inode, num_bytes);
        /*
         * If the inodes csum_bytes is the same as the original
         * csum_bytes then we know we haven't raced with any free()ers
@@ -5305,7 +5231,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
 
        num_bytes = ALIGN(num_bytes, root->sectorsize);
        spin_lock(&BTRFS_I(inode)->lock);
-       dropped = drop_outstanding_extent(inode);
+       dropped = drop_outstanding_extent(inode, num_bytes);
 
        if (num_bytes)
                to_free = calc_csum_metadata_size(inode, num_bytes, 0);
@@ -5375,8 +5301,9 @@ void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
        btrfs_free_reserved_data_space(inode, num_bytes);
 }
 
-static int update_block_group(struct btrfs_root *root,
-                             u64 bytenr, u64 num_bytes, int alloc)
+static int update_block_group(struct btrfs_trans_handle *trans,
+                             struct btrfs_root *root, u64 bytenr,
+                             u64 num_bytes, int alloc)
 {
        struct btrfs_block_group_cache *cache = NULL;
        struct btrfs_fs_info *info = root->fs_info;
@@ -5414,6 +5341,14 @@ static int update_block_group(struct btrfs_root *root,
                if (!alloc && cache->cached == BTRFS_CACHE_NO)
                        cache_block_group(cache, 1);
 
+               spin_lock(&trans->transaction->dirty_bgs_lock);
+               if (list_empty(&cache->dirty_list)) {
+                       list_add_tail(&cache->dirty_list,
+                                     &trans->transaction->dirty_bgs);
+                       btrfs_get_block_group(cache);
+               }
+               spin_unlock(&trans->transaction->dirty_bgs_lock);
+
                byte_in_group = bytenr - cache->key.objectid;
                WARN_ON(byte_in_group > cache->key.offset);
 
@@ -5424,7 +5359,6 @@ static int update_block_group(struct btrfs_root *root,
                    cache->disk_cache_state < BTRFS_DC_CLEAR)
                        cache->disk_cache_state = BTRFS_DC_CLEAR;
 
-               cache->dirty = 1;
                old_val = btrfs_block_group_used(&cache->item);
                num_bytes = min(total, cache->key.offset - byte_in_group);
                if (alloc) {
@@ -5807,10 +5741,13 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
                unpin = &fs_info->freed_extents[0];
 
        while (1) {
+               mutex_lock(&fs_info->unused_bg_unpin_mutex);
                ret = find_first_extent_bit(unpin, 0, &start, &end,
                                            EXTENT_DIRTY, NULL);
-               if (ret)
+               if (ret) {
+                       mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                        break;
+               }
 
                if (btrfs_test_opt(root, DISCARD))
                        ret = btrfs_discard_extent(root, start,
@@ -5818,6 +5755,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
 
                clear_extent_dirty(unpin, start, end, GFP_NOFS);
                unpin_extent_range(root, start, end, true);
+               mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                cond_resched();
        }
 
@@ -6103,7 +6041,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
                        }
                }
 
-               ret = update_block_group(root, bytenr, num_bytes, 0);
+               ret = update_block_group(trans, root, bytenr, num_bytes, 0);
                if (ret) {
                        btrfs_abort_transaction(trans, extent_root, ret);
                        goto out;
@@ -6205,7 +6143,6 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                           struct extent_buffer *buf,
                           u64 parent, int last_ref)
 {
-       struct btrfs_block_group_cache *cache = NULL;
        int pin = 1;
        int ret;
 
@@ -6221,17 +6158,20 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
        if (!last_ref)
                return;
 
-       cache = btrfs_lookup_block_group(root->fs_info, buf->start);
-
        if (btrfs_header_generation(buf) == trans->transid) {
+               struct btrfs_block_group_cache *cache;
+
                if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
                        ret = check_ref_cleanup(trans, root, buf->start);
                        if (!ret)
                                goto out;
                }
 
+               cache = btrfs_lookup_block_group(root->fs_info, buf->start);
+
                if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
                        pin_down_extent(root, cache, buf->start, buf->len, 1);
+                       btrfs_put_block_group(cache);
                        goto out;
                }
 
@@ -6239,6 +6179,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
 
                btrfs_add_free_space(cache, buf->start, buf->len);
                btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
+               btrfs_put_block_group(cache);
                trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
                pin = 0;
        }
@@ -6253,7 +6194,6 @@ out:
         * anymore.
         */
        clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
-       btrfs_put_block_group(cache);
 }
 
 /* Can return -ENOMEM */
@@ -7063,7 +7003,7 @@ static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
        if (ret)
                return ret;
 
-       ret = update_block_group(root, ins->objectid, ins->offset, 1);
+       ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
        if (ret) { /* -ENOENT, logic error */
                btrfs_err(fs_info, "update block group failed for %llu %llu",
                        ins->objectid, ins->offset);
@@ -7152,7 +7092,8 @@ static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
                        return ret;
        }
 
-       ret = update_block_group(root, ins->objectid, root->nodesize, 1);
+       ret = update_block_group(trans, root, ins->objectid, root->nodesize,
+                                1);
        if (ret) { /* -ENOENT, logic error */
                btrfs_err(fs_info, "update block group failed for %llu %llu",
                        ins->objectid, ins->offset);
@@ -7217,11 +7158,11 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
 
 static struct extent_buffer *
 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                     u64 bytenr, u32 blocksize, int level)
+                     u64 bytenr, int level)
 {
        struct extent_buffer *buf;
 
-       buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
+       buf = btrfs_find_create_tree_block(root, bytenr);
        if (!buf)
                return ERR_PTR(-ENOMEM);
        btrfs_set_header_generation(buf, trans->transid);
@@ -7340,7 +7281,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
 
        if (btrfs_test_is_dummy_root(root)) {
                buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
-                                           blocksize, level);
+                                           level);
                if (!IS_ERR(buf))
                        root->alloc_bytenr += blocksize;
                return buf;
@@ -7357,8 +7298,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
                return ERR_PTR(ret);
        }
 
-       buf = btrfs_init_new_buffer(trans, root, ins.objectid,
-                                   blocksize, level);
+       buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
        BUG_ON(IS_ERR(buf)); /* -ENOMEM */
 
        if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
@@ -7487,7 +7427,7 @@ static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
                                continue;
                }
 reada:
-               readahead_tree_block(root, bytenr, blocksize);
+               readahead_tree_block(root, bytenr);
                nread++;
        }
        wc->reada_slot = slot;
@@ -7828,7 +7768,7 @@ static noinline int do_walk_down(struct btrfs_trans_handle *trans,
 
        next = btrfs_find_tree_block(root, bytenr);
        if (!next) {
-               next = btrfs_find_create_tree_block(root, bytenr, blocksize);
+               next = btrfs_find_create_tree_block(root, bytenr);
                if (!next)
                        return -ENOMEM;
                btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
@@ -8548,14 +8488,6 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
        if (IS_ERR(trans))
                return PTR_ERR(trans);
 
-       alloc_flags = update_block_group_flags(root, cache->flags);
-       if (alloc_flags != cache->flags) {
-               ret = do_chunk_alloc(trans, root, alloc_flags,
-                                    CHUNK_ALLOC_FORCE);
-               if (ret < 0)
-                       goto out;
-       }
-
        ret = set_block_group_ro(cache, 0);
        if (!ret)
                goto out;
@@ -8566,6 +8498,11 @@ int btrfs_set_block_group_ro(struct btrfs_root *root,
                goto out;
        ret = set_block_group_ro(cache, 0);
 out:
+       if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
+               alloc_flags = update_block_group_flags(root, cache->flags);
+               check_system_chunk(trans, root, alloc_flags);
+       }
+
        btrfs_end_transaction(trans, root);
        return ret;
 }
@@ -9005,6 +8942,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
        INIT_LIST_HEAD(&cache->cluster_list);
        INIT_LIST_HEAD(&cache->bg_list);
        INIT_LIST_HEAD(&cache->ro_list);
+       INIT_LIST_HEAD(&cache->dirty_list);
        btrfs_init_free_space_ctl(cache);
        atomic_set(&cache->trimming, 0);
 
@@ -9068,9 +9006,8 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                         * b) Setting 'dirty flag' makes sure that we flush
                         *    the new space cache info onto disk.
                         */
-                       cache->disk_cache_state = BTRFS_DC_CLEAR;
                        if (btrfs_test_opt(root, SPACE_CACHE))
-                               cache->dirty = 1;
+                               cache->disk_cache_state = BTRFS_DC_CLEAR;
                }
 
                read_extent_buffer(leaf, &cache->item,
@@ -9460,6 +9397,13 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                }
        }
 
+       spin_lock(&trans->transaction->dirty_bgs_lock);
+       if (!list_empty(&block_group->dirty_list)) {
+               list_del_init(&block_group->dirty_list);
+               btrfs_put_block_group(block_group);
+       }
+       spin_unlock(&trans->transaction->dirty_bgs_lock);
+
        btrfs_remove_free_space_cache(block_group);
 
        spin_lock(&block_group->space_info->lock);
@@ -9611,7 +9555,8 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
                 * Want to do this before we do anything else so we can recover
                 * properly if we fail to join the transaction.
                 */
-               trans = btrfs_join_transaction(root);
+               /* 1 for btrfs_orphan_reserve_metadata() */
+               trans = btrfs_start_transaction(root, 1);
                if (IS_ERR(trans)) {
                        btrfs_set_block_group_rw(root, block_group);
                        ret = PTR_ERR(trans);
@@ -9624,18 +9569,33 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
                 */
                start = block_group->key.objectid;
                end = start + block_group->key.offset - 1;
+               /*
+                * Hold the unused_bg_unpin_mutex lock to avoid racing with
+                * btrfs_finish_extent_commit(). If we are at transaction N,
+                * another task might be running finish_extent_commit() for the
+                * previous transaction N - 1, and have seen a range belonging
+                * to the block group in freed_extents[] before we were able to
+                * clear the whole block group range from freed_extents[]. This
+                * means that task can lookup for the block group after we
+                * unpinned it from freed_extents[] and removed it, leading to
+                * a BUG_ON() at btrfs_unpin_extent_range().
+                */
+               mutex_lock(&fs_info->unused_bg_unpin_mutex);
                ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
                                  EXTENT_DIRTY, GFP_NOFS);
                if (ret) {
+                       mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                        btrfs_set_block_group_rw(root, block_group);
                        goto end_trans;
                }
                ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
                                  EXTENT_DIRTY, GFP_NOFS);
                if (ret) {
+                       mutex_unlock(&fs_info->unused_bg_unpin_mutex);
                        btrfs_set_block_group_rw(root, block_group);
                        goto end_trans;
                }
+               mutex_unlock(&fs_info->unused_bg_unpin_mutex);
 
                /* Reset pinned so btrfs_put_block_group doesn't complain */
                block_group->pinned = 0;
index c73df6a7c9b6ce0b8ee9beec01f4f0fe48ba5705..c7233ff1d533b653b8b9e7f29e022e9126e5f38a 100644 (file)
@@ -64,7 +64,7 @@ void btrfs_leak_debug_check(void)
 
        while (!list_empty(&states)) {
                state = list_entry(states.next, struct extent_state, leak_list);
-               pr_err("BTRFS: state leak: start %llu end %llu state %lu in tree %d refs %d\n",
+               pr_err("BTRFS: state leak: start %llu end %llu state %u in tree %d refs %d\n",
                       state->start, state->end, state->state,
                       extent_state_in_tree(state),
                       atomic_read(&state->refs));
@@ -396,21 +396,21 @@ static void merge_state(struct extent_io_tree *tree,
 }
 
 static void set_state_cb(struct extent_io_tree *tree,
-                        struct extent_state *state, unsigned long *bits)
+                        struct extent_state *state, unsigned *bits)
 {
        if (tree->ops && tree->ops->set_bit_hook)
                tree->ops->set_bit_hook(tree->mapping->host, state, bits);
 }
 
 static void clear_state_cb(struct extent_io_tree *tree,
-                          struct extent_state *state, unsigned long *bits)
+                          struct extent_state *state, unsigned *bits)
 {
        if (tree->ops && tree->ops->clear_bit_hook)
                tree->ops->clear_bit_hook(tree->mapping->host, state, bits);
 }
 
 static void set_state_bits(struct extent_io_tree *tree,
-                          struct extent_state *state, unsigned long *bits);
+                          struct extent_state *state, unsigned *bits);
 
 /*
  * insert an extent_state struct into the tree.  'bits' are set on the
@@ -426,7 +426,7 @@ static int insert_state(struct extent_io_tree *tree,
                        struct extent_state *state, u64 start, u64 end,
                        struct rb_node ***p,
                        struct rb_node **parent,
-                       unsigned long *bits)
+                       unsigned *bits)
 {
        struct rb_node *node;
 
@@ -511,10 +511,10 @@ static struct extent_state *next_state(struct extent_state *state)
  */
 static struct extent_state *clear_state_bit(struct extent_io_tree *tree,
                                            struct extent_state *state,
-                                           unsigned long *bits, int wake)
+                                           unsigned *bits, int wake)
 {
        struct extent_state *next;
-       unsigned long bits_to_clear = *bits & ~EXTENT_CTLBITS;
+       unsigned bits_to_clear = *bits & ~EXTENT_CTLBITS;
 
        if ((bits_to_clear & EXTENT_DIRTY) && (state->state & EXTENT_DIRTY)) {
                u64 range = state->end - state->start + 1;
@@ -570,7 +570,7 @@ static void extent_io_tree_panic(struct extent_io_tree *tree, int err)
  * This takes the tree lock, and returns 0 on success and < 0 on error.
  */
 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                    unsigned long bits, int wake, int delete,
+                    unsigned bits, int wake, int delete,
                     struct extent_state **cached_state,
                     gfp_t mask)
 {
@@ -789,9 +789,9 @@ out:
 
 static void set_state_bits(struct extent_io_tree *tree,
                           struct extent_state *state,
-                          unsigned long *bits)
+                          unsigned *bits)
 {
-       unsigned long bits_to_set = *bits & ~EXTENT_CTLBITS;
+       unsigned bits_to_set = *bits & ~EXTENT_CTLBITS;
 
        set_state_cb(tree, state, bits);
        if ((bits_to_set & EXTENT_DIRTY) && !(state->state & EXTENT_DIRTY)) {
@@ -803,7 +803,7 @@ static void set_state_bits(struct extent_io_tree *tree,
 
 static void cache_state_if_flags(struct extent_state *state,
                                 struct extent_state **cached_ptr,
-                                const u64 flags)
+                                unsigned flags)
 {
        if (cached_ptr && !(*cached_ptr)) {
                if (!flags || (state->state & flags)) {
@@ -833,7 +833,7 @@ static void cache_state(struct extent_state *state,
 
 static int __must_check
 __set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                unsigned long bits, unsigned long exclusive_bits,
+                unsigned bits, unsigned exclusive_bits,
                 u64 *failed_start, struct extent_state **cached_state,
                 gfp_t mask)
 {
@@ -1034,7 +1034,7 @@ search_again:
 }
 
 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                  unsigned long bits, u64 * failed_start,
+                  unsigned bits, u64 * failed_start,
                   struct extent_state **cached_state, gfp_t mask)
 {
        return __set_extent_bit(tree, start, end, bits, 0, failed_start,
@@ -1060,7 +1060,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
  * boundary bits like LOCK.
  */
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                      unsigned long bits, unsigned long clear_bits,
+                      unsigned bits, unsigned clear_bits,
                       struct extent_state **cached_state, gfp_t mask)
 {
        struct extent_state *state;
@@ -1268,14 +1268,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 }
 
 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-                   unsigned long bits, gfp_t mask)
+                   unsigned bits, gfp_t mask)
 {
        return set_extent_bit(tree, start, end, bits, NULL,
                              NULL, mask);
 }
 
 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-                     unsigned long bits, gfp_t mask)
+                     unsigned bits, gfp_t mask)
 {
        return clear_extent_bit(tree, start, end, bits, 0, 0, NULL, mask);
 }
@@ -1330,10 +1330,11 @@ int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
  * us if waiting is desired.
  */
 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-                    unsigned long bits, struct extent_state **cached_state)
+                    unsigned bits, struct extent_state **cached_state)
 {
        int err;
        u64 failed_start;
+
        while (1) {
                err = __set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
                                       EXTENT_LOCKED, &failed_start,
@@ -1440,7 +1441,7 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
  */
 static struct extent_state *
 find_first_extent_bit_state(struct extent_io_tree *tree,
-                           u64 start, unsigned long bits)
+                           u64 start, unsigned bits)
 {
        struct rb_node *node;
        struct extent_state *state;
@@ -1474,7 +1475,7 @@ out:
  * If nothing was found, 1 is returned. If found something, return 0.
  */
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
-                         u64 *start_ret, u64 *end_ret, unsigned long bits,
+                         u64 *start_ret, u64 *end_ret, unsigned bits,
                          struct extent_state **cached_state)
 {
        struct extent_state *state;
@@ -1753,7 +1754,7 @@ out_failed:
 
 int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
                                 struct page *locked_page,
-                                unsigned long clear_bits,
+                                unsigned clear_bits,
                                 unsigned long page_ops)
 {
        struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
@@ -1810,7 +1811,7 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
  */
 u64 count_range_bits(struct extent_io_tree *tree,
                     u64 *start, u64 search_end, u64 max_bytes,
-                    unsigned long bits, int contig)
+                    unsigned bits, int contig)
 {
        struct rb_node *node;
        struct extent_state *state;
@@ -1928,7 +1929,7 @@ out:
  * range is found set.
  */
 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                  unsigned long bits, int filled, struct extent_state *cached)
+                  unsigned bits, int filled, struct extent_state *cached)
 {
        struct extent_state *state = NULL;
        struct rb_node *node;
@@ -2057,7 +2058,7 @@ int repair_io_failure(struct inode *inode, u64 start, u64 length, u64 logical,
        sector = bbio->stripes[mirror_num-1].physical >> 9;
        bio->bi_iter.bi_sector = sector;
        dev = bbio->stripes[mirror_num-1].dev;
-       kfree(bbio);
+       btrfs_put_bbio(bbio);
        if (!dev || !dev->bdev || !dev->writeable) {
                bio_put(bio);
                return -EIO;
@@ -2816,8 +2817,10 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
                    bio_add_page(bio, page, page_size, offset) < page_size) {
                        ret = submit_one_bio(rw, bio, mirror_num,
                                             prev_bio_flags);
-                       if (ret < 0)
+                       if (ret < 0) {
+                               *bio_ret = NULL;
                                return ret;
+                       }
                        bio = NULL;
                } else {
                        return 0;
@@ -3239,7 +3242,7 @@ static noinline_for_stack int writepage_delalloc(struct inode *inode,
                                               page,
                                               &delalloc_start,
                                               &delalloc_end,
-                                              128 * 1024 * 1024);
+                                              BTRFS_MAX_EXTENT_SIZE);
                if (nr_delalloc == 0) {
                        delalloc_start = delalloc_end + 1;
                        continue;
@@ -4598,11 +4601,11 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb)
 
 static struct extent_buffer *
 __alloc_extent_buffer(struct btrfs_fs_info *fs_info, u64 start,
-                     unsigned long len, gfp_t mask)
+                     unsigned long len)
 {
        struct extent_buffer *eb = NULL;
 
-       eb = kmem_cache_zalloc(extent_buffer_cache, mask);
+       eb = kmem_cache_zalloc(extent_buffer_cache, GFP_NOFS);
        if (eb == NULL)
                return NULL;
        eb->start = start;
@@ -4643,7 +4646,7 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
        struct extent_buffer *new;
        unsigned long num_pages = num_extent_pages(src->start, src->len);
 
-       new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_NOFS);
+       new = __alloc_extent_buffer(src->fs_info, src->start, src->len);
        if (new == NULL)
                return NULL;
 
@@ -4666,13 +4669,26 @@ struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
        return new;
 }
 
-struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
+struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
+                                               u64 start)
 {
        struct extent_buffer *eb;
-       unsigned long num_pages = num_extent_pages(0, len);
+       unsigned long len;
+       unsigned long num_pages;
        unsigned long i;
 
-       eb = __alloc_extent_buffer(NULL, start, len, GFP_NOFS);
+       if (!fs_info) {
+               /*
+                * Called only from tests that don't always have a fs_info
+                * available, but we know that nodesize is 4096
+                */
+               len = 4096;
+       } else {
+               len = fs_info->tree_root->nodesize;
+       }
+       num_pages = num_extent_pages(0, len);
+
+       eb = __alloc_extent_buffer(fs_info, start, len);
        if (!eb)
                return NULL;
 
@@ -4762,7 +4778,7 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
 
 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
-                                              u64 start, unsigned long len)
+                                              u64 start)
 {
        struct extent_buffer *eb, *exists = NULL;
        int ret;
@@ -4770,7 +4786,7 @@ struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
        eb = find_extent_buffer(fs_info, start);
        if (eb)
                return eb;
-       eb = alloc_dummy_extent_buffer(start, len);
+       eb = alloc_dummy_extent_buffer(fs_info, start);
        if (!eb)
                return NULL;
        eb->fs_info = fs_info;
@@ -4808,8 +4824,9 @@ free_eb:
 #endif
 
 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
-                                         u64 start, unsigned long len)
+                                         u64 start)
 {
+       unsigned long len = fs_info->tree_root->nodesize;
        unsigned long num_pages = num_extent_pages(start, len);
        unsigned long i;
        unsigned long index = start >> PAGE_CACHE_SHIFT;
@@ -4824,7 +4841,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
        if (eb)
                return eb;
 
-       eb = __alloc_extent_buffer(fs_info, start, len, GFP_NOFS);
+       eb = __alloc_extent_buffer(fs_info, start, len);
        if (!eb)
                return NULL;
 
index ece9ce87edff521fa0a54f38bca0cb47059c638b..695b0ccfb7553e786f4c9634d3a86c0263c86321 100644 (file)
@@ -4,22 +4,22 @@
 #include <linux/rbtree.h>
 
 /* bits for the extent state */
-#define EXTENT_DIRTY 1
-#define EXTENT_WRITEBACK (1 << 1)
-#define EXTENT_UPTODATE (1 << 2)
-#define EXTENT_LOCKED (1 << 3)
-#define EXTENT_NEW (1 << 4)
-#define EXTENT_DELALLOC (1 << 5)
-#define EXTENT_DEFRAG (1 << 6)
-#define EXTENT_BOUNDARY (1 << 9)
-#define EXTENT_NODATASUM (1 << 10)
-#define EXTENT_DO_ACCOUNTING (1 << 11)
-#define EXTENT_FIRST_DELALLOC (1 << 12)
-#define EXTENT_NEED_WAIT (1 << 13)
-#define EXTENT_DAMAGED (1 << 14)
-#define EXTENT_NORESERVE (1 << 15)
-#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
-#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
+#define EXTENT_DIRTY           (1U << 0)
+#define EXTENT_WRITEBACK       (1U << 1)
+#define EXTENT_UPTODATE                (1U << 2)
+#define EXTENT_LOCKED          (1U << 3)
+#define EXTENT_NEW             (1U << 4)
+#define EXTENT_DELALLOC                (1U << 5)
+#define EXTENT_DEFRAG          (1U << 6)
+#define EXTENT_BOUNDARY                (1U << 9)
+#define EXTENT_NODATASUM       (1U << 10)
+#define EXTENT_DO_ACCOUNTING   (1U << 11)
+#define EXTENT_FIRST_DELALLOC  (1U << 12)
+#define EXTENT_NEED_WAIT       (1U << 13)
+#define EXTENT_DAMAGED         (1U << 14)
+#define EXTENT_NORESERVE       (1U << 15)
+#define EXTENT_IOBITS          (EXTENT_LOCKED | EXTENT_WRITEBACK)
+#define EXTENT_CTLBITS         (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
 
 /*
  * flags for bio submission. The high bits indicate the compression
@@ -81,9 +81,9 @@ struct extent_io_ops {
        int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
                                      struct extent_state *state, int uptodate);
        void (*set_bit_hook)(struct inode *inode, struct extent_state *state,
-                            unsigned long *bits);
+                            unsigned *bits);
        void (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
-                              unsigned long *bits);
+                              unsigned *bits);
        void (*merge_extent_hook)(struct inode *inode,
                                  struct extent_state *new,
                                  struct extent_state *other);
@@ -108,7 +108,7 @@ struct extent_state {
        /* ADD NEW ELEMENTS AFTER THIS */
        wait_queue_head_t wq;
        atomic_t refs;
-       unsigned long state;
+       unsigned state;
 
        /* for use by the FS */
        u64 private;
@@ -188,7 +188,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
 int try_release_extent_buffer(struct page *page);
 int lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
 int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-                    unsigned long bits, struct extent_state **cached);
+                    unsigned bits, struct extent_state **cached);
 int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end);
 int unlock_extent_cached(struct extent_io_tree *tree, u64 start, u64 end,
                         struct extent_state **cached, gfp_t mask);
@@ -202,21 +202,21 @@ void extent_io_exit(void);
 
 u64 count_range_bits(struct extent_io_tree *tree,
                     u64 *start, u64 search_end,
-                    u64 max_bytes, unsigned long bits, int contig);
+                    u64 max_bytes, unsigned bits, int contig);
 
 void free_extent_state(struct extent_state *state);
 int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                  unsigned long bits, int filled,
+                  unsigned bits, int filled,
                   struct extent_state *cached_state);
 int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-                     unsigned long bits, gfp_t mask);
+                     unsigned bits, gfp_t mask);
 int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                    unsigned long bits, int wake, int delete,
+                    unsigned bits, int wake, int delete,
                     struct extent_state **cached, gfp_t mask);
 int set_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
-                   unsigned long bits, gfp_t mask);
+                   unsigned bits, gfp_t mask);
 int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                  unsigned long bits, u64 *failed_start,
+                  unsigned bits, u64 *failed_start,
                   struct extent_state **cached_state, gfp_t mask);
 int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
                        struct extent_state **cached_state, gfp_t mask);
@@ -229,14 +229,14 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
 int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
                       gfp_t mask);
 int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                      unsigned long bits, unsigned long clear_bits,
+                      unsigned bits, unsigned clear_bits,
                       struct extent_state **cached_state, gfp_t mask);
 int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
                        struct extent_state **cached_state, gfp_t mask);
 int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
                      struct extent_state **cached_state, gfp_t mask);
 int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
-                         u64 *start_ret, u64 *end_ret, unsigned long bits,
+                         u64 *start_ret, u64 *end_ret, unsigned bits,
                          struct extent_state **cached_state);
 int extent_invalidatepage(struct extent_io_tree *tree,
                          struct page *page, unsigned long offset);
@@ -262,8 +262,9 @@ int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private);
 void set_page_extent_mapped(struct page *page);
 
 struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
-                                         u64 start, unsigned long len);
-struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len);
+                                         u64 start);
+struct extent_buffer *alloc_dummy_extent_buffer(struct btrfs_fs_info *fs_info,
+               u64 start);
 struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
 struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
                                         u64 start);
@@ -322,7 +323,7 @@ int extent_range_clear_dirty_for_io(struct inode *inode, u64 start, u64 end);
 int extent_range_redirty_for_io(struct inode *inode, u64 start, u64 end);
 int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
                                 struct page *locked_page,
-                                unsigned long bits_to_clear,
+                                unsigned bits_to_clear,
                                 unsigned long page_ops);
 struct bio *
 btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
@@ -377,5 +378,5 @@ noinline u64 find_lock_delalloc_range(struct inode *inode,
                                      u64 *end, u64 max_bytes);
 #endif
 struct extent_buffer *alloc_test_extent_buffer(struct btrfs_fs_info *fs_info,
-                                              u64 start, unsigned long len);
+                                              u64 start);
 #endif
index d6c03f7f136b359c534668a38f9e9a72d299eb66..a71978578fa71a11d83f8c435541b807eedc2b51 100644 (file)
@@ -651,15 +651,13 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        struct io_ctl io_ctl;
        struct btrfs_key key;
        struct btrfs_free_space *e, *n;
-       struct list_head bitmaps;
+       LIST_HEAD(bitmaps);
        u64 num_entries;
        u64 num_bitmaps;
        u64 generation;
        u8 type;
        int ret = 0;
 
-       INIT_LIST_HEAD(&bitmaps);
-
        /* Nothing in the space cache, goodbye */
        if (!i_size_read(inode))
                return 0;
@@ -1243,6 +1241,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct inode *inode;
        int ret = 0;
+       enum btrfs_disk_cache_state dcs = BTRFS_DC_WRITTEN;
 
        root = root->fs_info->tree_root;
 
@@ -1266,9 +1265,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
        ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans,
                                      path, block_group->key.objectid);
        if (ret) {
-               spin_lock(&block_group->lock);
-               block_group->disk_cache_state = BTRFS_DC_ERROR;
-               spin_unlock(&block_group->lock);
+               dcs = BTRFS_DC_ERROR;
                ret = 0;
 #ifdef DEBUG
                btrfs_err(root->fs_info,
@@ -1277,6 +1274,9 @@ int btrfs_write_out_cache(struct btrfs_root *root,
 #endif
        }
 
+       spin_lock(&block_group->lock);
+       block_group->disk_cache_state = dcs;
+       spin_unlock(&block_group->lock);
        iput(inode);
        return ret;
 }
@@ -2903,7 +2903,6 @@ int btrfs_find_space_cluster(struct btrfs_root *root,
        trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
                                 min_bytes);
 
-       INIT_LIST_HEAD(&bitmaps);
        ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
                                      bytes + empty_size,
                                      cont1_bytes, min_bytes);
index 8ffa4783cbf438ed182e6f94e39711ded5207558..265e03c73f4daaea4ac1b69ab0a606c63f50895f 100644 (file)
@@ -344,6 +344,7 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
                return -ENOMEM;
 
        path->leave_spinning = 1;
+       path->skip_release_on_error = 1;
        ret = btrfs_insert_empty_item(trans, root, path, &key,
                                      ins_len);
        if (ret == -EEXIST) {
@@ -362,8 +363,12 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
                ptr = (unsigned long)(ref + 1);
                ret = 0;
        } else if (ret < 0) {
-               if (ret == -EOVERFLOW)
-                       ret = -EMLINK;
+               if (ret == -EOVERFLOW) {
+                       if (find_name_in_backref(path, name, name_len, &ref))
+                               ret = -EEXIST;
+                       else
+                               ret = -EMLINK;
+               }
                goto out;
        } else {
                ref = btrfs_item_ptr(path->nodes[0], path->slots[0],
index 54bcf639d1cf1f48df951477b16e6266402f7bc6..a85c23dfcddbcfd992069811f24b116d74e4dc21 100644 (file)
@@ -1530,10 +1530,45 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
 static void btrfs_split_extent_hook(struct inode *inode,
                                    struct extent_state *orig, u64 split)
 {
+       u64 size;
+
        /* not delalloc, ignore it */
        if (!(orig->state & EXTENT_DELALLOC))
                return;
 
+       size = orig->end - orig->start + 1;
+       if (size > BTRFS_MAX_EXTENT_SIZE) {
+               u64 num_extents;
+               u64 new_size;
+
+               /*
+                * We need the largest size of the remaining extent to see if we
+                * need to add a new outstanding extent.  Think of the following
+                * case
+                *
+                * [MEAX_EXTENT_SIZEx2 - 4k][4k]
+                *
+                * The new_size would just be 4k and we'd think we had enough
+                * outstanding extents for this if we only took one side of the
+                * split, same goes for the other direction.  We need to see if
+                * the larger size still is the same amount of extents as the
+                * original size, because if it is we need to add a new
+                * outstanding extent.  But if we split up and the larger size
+                * is less than the original then we are good to go since we've
+                * already accounted for the extra extent in our original
+                * accounting.
+                */
+               new_size = orig->end - split + 1;
+               if ((split - orig->start) > new_size)
+                       new_size = split - orig->start;
+
+               num_extents = div64_u64(size + BTRFS_MAX_EXTENT_SIZE - 1,
+                                       BTRFS_MAX_EXTENT_SIZE);
+               if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
+                             BTRFS_MAX_EXTENT_SIZE) < num_extents)
+                       return;
+       }
+
        spin_lock(&BTRFS_I(inode)->lock);
        BTRFS_I(inode)->outstanding_extents++;
        spin_unlock(&BTRFS_I(inode)->lock);
@@ -1549,10 +1584,34 @@ static void btrfs_merge_extent_hook(struct inode *inode,
                                    struct extent_state *new,
                                    struct extent_state *other)
 {
+       u64 new_size, old_size;
+       u64 num_extents;
+
        /* not delalloc, ignore it */
        if (!(other->state & EXTENT_DELALLOC))
                return;
 
+       old_size = other->end - other->start + 1;
+       new_size = old_size + (new->end - new->start + 1);
+
+       /* we're not bigger than the max, unreserve the space and go */
+       if (new_size <= BTRFS_MAX_EXTENT_SIZE) {
+               spin_lock(&BTRFS_I(inode)->lock);
+               BTRFS_I(inode)->outstanding_extents--;
+               spin_unlock(&BTRFS_I(inode)->lock);
+               return;
+       }
+
+       /*
+        * If we grew by another max_extent, just return, we want to keep that
+        * reserved amount.
+        */
+       num_extents = div64_u64(old_size + BTRFS_MAX_EXTENT_SIZE - 1,
+                               BTRFS_MAX_EXTENT_SIZE);
+       if (div64_u64(new_size + BTRFS_MAX_EXTENT_SIZE - 1,
+                     BTRFS_MAX_EXTENT_SIZE) > num_extents)
+               return;
+
        spin_lock(&BTRFS_I(inode)->lock);
        BTRFS_I(inode)->outstanding_extents--;
        spin_unlock(&BTRFS_I(inode)->lock);
@@ -1604,7 +1663,7 @@ static void btrfs_del_delalloc_inode(struct btrfs_root *root,
  * have pending delalloc work to be done.
  */
 static void btrfs_set_bit_hook(struct inode *inode,
-                              struct extent_state *state, unsigned long *bits)
+                              struct extent_state *state, unsigned *bits)
 {
 
        if ((*bits & EXTENT_DEFRAG) && !(*bits & EXTENT_DELALLOC))
@@ -1645,9 +1704,11 @@ static void btrfs_set_bit_hook(struct inode *inode,
  */
 static void btrfs_clear_bit_hook(struct inode *inode,
                                 struct extent_state *state,
-                                unsigned long *bits)
+                                unsigned *bits)
 {
        u64 len = state->end + 1 - state->start;
+       u64 num_extents = div64_u64(len + BTRFS_MAX_EXTENT_SIZE -1,
+                                   BTRFS_MAX_EXTENT_SIZE);
 
        spin_lock(&BTRFS_I(inode)->lock);
        if ((state->state & EXTENT_DEFRAG) && (*bits & EXTENT_DEFRAG))
@@ -1667,7 +1728,7 @@ static void btrfs_clear_bit_hook(struct inode *inode,
                        *bits &= ~EXTENT_FIRST_DELALLOC;
                } else if (!(*bits & EXTENT_DO_ACCOUNTING)) {
                        spin_lock(&BTRFS_I(inode)->lock);
-                       BTRFS_I(inode)->outstanding_extents--;
+                       BTRFS_I(inode)->outstanding_extents -= num_extents;
                        spin_unlock(&BTRFS_I(inode)->lock);
                }
 
@@ -2945,7 +3006,7 @@ static int __readpage_endio_check(struct inode *inode,
        return 0;
 zeroit:
        if (__ratelimit(&_rs))
-               btrfs_info(BTRFS_I(inode)->root->fs_info,
+               btrfs_warn(BTRFS_I(inode)->root->fs_info,
                           "csum failed ino %llu off %llu csum %u expected csum %u",
                           btrfs_ino(inode), start, csum, csum_expected);
        memset(kaddr + pgoff, 1, len);
@@ -3407,7 +3468,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
 
 out:
        if (ret)
-               btrfs_crit(root->fs_info,
+               btrfs_err(root->fs_info,
                        "could not do orphan cleanup %d", ret);
        btrfs_free_path(path);
        return ret;
@@ -3490,7 +3551,6 @@ static void btrfs_read_locked_inode(struct inode *inode)
        struct btrfs_path *path;
        struct extent_buffer *leaf;
        struct btrfs_inode_item *inode_item;
-       struct btrfs_timespec *tspec;
        struct btrfs_root *root = BTRFS_I(inode)->root;
        struct btrfs_key location;
        unsigned long ptr;
@@ -3527,17 +3587,19 @@ static void btrfs_read_locked_inode(struct inode *inode)
        i_gid_write(inode, btrfs_inode_gid(leaf, inode_item));
        btrfs_i_size_write(inode, btrfs_inode_size(leaf, inode_item));
 
-       tspec = btrfs_inode_atime(inode_item);
-       inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, tspec);
-       inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
+       inode->i_atime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->atime);
+       inode->i_atime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->atime);
+
+       inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->mtime);
+       inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->mtime);
 
-       tspec = btrfs_inode_mtime(inode_item);
-       inode->i_mtime.tv_sec = btrfs_timespec_sec(leaf, tspec);
-       inode->i_mtime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
+       inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, &inode_item->ctime);
+       inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, &inode_item->ctime);
 
-       tspec = btrfs_inode_ctime(inode_item);
-       inode->i_ctime.tv_sec = btrfs_timespec_sec(leaf, tspec);
-       inode->i_ctime.tv_nsec = btrfs_timespec_nsec(leaf, tspec);
+       BTRFS_I(inode)->i_otime.tv_sec =
+               btrfs_timespec_sec(leaf, &inode_item->otime);
+       BTRFS_I(inode)->i_otime.tv_nsec =
+               btrfs_timespec_nsec(leaf, &inode_item->otime);
 
        inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
        BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
@@ -3656,21 +3718,26 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
        btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
 
-       btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
+       btrfs_set_token_timespec_sec(leaf, &item->atime,
                                     inode->i_atime.tv_sec, &token);
-       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
+       btrfs_set_token_timespec_nsec(leaf, &item->atime,
                                      inode->i_atime.tv_nsec, &token);
 
-       btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
+       btrfs_set_token_timespec_sec(leaf, &item->mtime,
                                     inode->i_mtime.tv_sec, &token);
-       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
+       btrfs_set_token_timespec_nsec(leaf, &item->mtime,
                                      inode->i_mtime.tv_nsec, &token);
 
-       btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
+       btrfs_set_token_timespec_sec(leaf, &item->ctime,
                                     inode->i_ctime.tv_sec, &token);
-       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
+       btrfs_set_token_timespec_nsec(leaf, &item->ctime,
                                      inode->i_ctime.tv_nsec, &token);
 
+       btrfs_set_token_timespec_sec(leaf, &item->otime,
+                                    BTRFS_I(inode)->i_otime.tv_sec, &token);
+       btrfs_set_token_timespec_nsec(leaf, &item->otime,
+                                     BTRFS_I(inode)->i_otime.tv_nsec, &token);
+
        btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
                                     &token);
        btrfs_set_token_inode_generation(leaf, item, BTRFS_I(inode)->generation,
@@ -5007,6 +5074,7 @@ static int fixup_tree_root_location(struct btrfs_root *root,
        struct btrfs_root *new_root;
        struct btrfs_root_ref *ref;
        struct extent_buffer *leaf;
+       struct btrfs_key key;
        int ret;
        int err = 0;
 
@@ -5017,9 +5085,12 @@ static int fixup_tree_root_location(struct btrfs_root *root,
        }
 
        err = -ENOENT;
-       ret = btrfs_find_item(root->fs_info->tree_root, path,
-                               BTRFS_I(dir)->root->root_key.objectid,
-                               location->objectid, BTRFS_ROOT_REF_KEY, NULL);
+       key.objectid = BTRFS_I(dir)->root->root_key.objectid;
+       key.type = BTRFS_ROOT_REF_KEY;
+       key.offset = location->objectid;
+
+       ret = btrfs_search_slot(NULL, root->fs_info->tree_root, &key, path,
+                               0, 0);
        if (ret) {
                if (ret < 0)
                        err = ret;
@@ -5258,7 +5329,10 @@ static struct inode *new_simple_dir(struct super_block *s,
        inode->i_op = &btrfs_dir_ro_inode_operations;
        inode->i_fop = &simple_dir_operations;
        inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
-       inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+       inode->i_mtime = CURRENT_TIME;
+       inode->i_atime = inode->i_mtime;
+       inode->i_ctime = inode->i_mtime;
+       BTRFS_I(inode)->i_otime = inode->i_mtime;
 
        return inode;
 }
@@ -5826,7 +5900,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
 
        inode_init_owner(inode, dir, mode);
        inode_set_bytes(inode, 0);
-       inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+
+       inode->i_mtime = CURRENT_TIME;
+       inode->i_atime = inode->i_mtime;
+       inode->i_ctime = inode->i_mtime;
+       BTRFS_I(inode)->i_otime = inode->i_mtime;
+
        inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                  struct btrfs_inode_item);
        memset_extent_buffer(path->nodes[0], 0, (unsigned long)inode_item,
@@ -7134,11 +7213,12 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
        u64 start = iblock << inode->i_blkbits;
        u64 lockstart, lockend;
        u64 len = bh_result->b_size;
+       u64 orig_len = len;
        int unlock_bits = EXTENT_LOCKED;
        int ret = 0;
 
        if (create)
-               unlock_bits |= EXTENT_DELALLOC | EXTENT_DIRTY;
+               unlock_bits |= EXTENT_DIRTY;
        else
                len = min_t(u64, len, root->sectorsize);
 
@@ -7269,14 +7349,12 @@ unlock:
                if (start + len > i_size_read(inode))
                        i_size_write(inode, start + len);
 
-               spin_lock(&BTRFS_I(inode)->lock);
-               BTRFS_I(inode)->outstanding_extents++;
-               spin_unlock(&BTRFS_I(inode)->lock);
-
-               ret = set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
-                                    lockstart + len - 1, EXTENT_DELALLOC, NULL,
-                                    &cached_state, GFP_NOFS);
-               BUG_ON(ret);
+               if (len < orig_len) {
+                       spin_lock(&BTRFS_I(inode)->lock);
+                       BTRFS_I(inode)->outstanding_extents++;
+                       spin_unlock(&BTRFS_I(inode)->lock);
+               }
+               btrfs_free_reserved_data_space(inode, len);
        }
 
        /*
@@ -7805,8 +7883,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
        }
 
        /* async crcs make it difficult to collect full stripe writes. */
-       if (btrfs_get_alloc_profile(root, 1) &
-           (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6))
+       if (btrfs_get_alloc_profile(root, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK)
                async_submit = 0;
        else
                async_submit = 1;
@@ -8053,8 +8130,6 @@ static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
                else if (ret >= 0 && (size_t)ret < count)
                        btrfs_delalloc_release_space(inode,
                                                     count - (size_t)ret);
-               else
-                       btrfs_delalloc_release_metadata(inode, 0);
        }
 out:
        if (wakeup)
@@ -8575,6 +8650,9 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
 
        ei->delayed_node = NULL;
 
+       ei->i_otime.tv_sec = 0;
+       ei->i_otime.tv_nsec = 0;
+
        inode = &ei->vfs_inode;
        extent_map_tree_init(&ei->extent_tree);
        extent_io_tree_init(&ei->io_tree, &inode->i_data);
index d49fe8a0f6b5c9ada112830f6f27a8eafe202c92..74609b931ba5564da01de0955b8aa1d3142512d7 100644 (file)
@@ -776,11 +776,11 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
            IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
                return -EPERM;
        if (isdir) {
-               if (!S_ISDIR(victim->d_inode->i_mode))
+               if (!d_is_dir(victim))
                        return -ENOTDIR;
                if (IS_ROOT(victim))
                        return -EBUSY;
-       } else if (S_ISDIR(victim->d_inode->i_mode))
+       } else if (d_is_dir(victim))
                return -EISDIR;
        if (IS_DEADDIR(dir))
                return -ENOENT;
index 48b60dbf807fd170593b2e0c7d0a3d1a36f26f58..97159a8e91d40b24ca1a8f6367892ecf2ad8b960 100644 (file)
@@ -1431,9 +1431,8 @@ static int qgroup_excl_accounting(struct btrfs_fs_info *fs_info,
                qgroup = u64_to_ptr(unode->aux);
                qgroup->rfer += sign * oper->num_bytes;
                qgroup->rfer_cmpr += sign * oper->num_bytes;
+               WARN_ON(sign < 0 && qgroup->excl < oper->num_bytes);
                qgroup->excl += sign * oper->num_bytes;
-               if (sign < 0)
-                       WARN_ON(qgroup->excl < oper->num_bytes);
                qgroup->excl_cmpr += sign * oper->num_bytes;
                qgroup_dirty(fs_info, qgroup);
 
index 8ab2a17bbba8b754bdcf90721d3ca40fc0e2f4b6..5264858ed7683f2306ccba372bf510c4509862fa 100644 (file)
  */
 #define RBIO_CACHE_READY_BIT   3
 
-/*
- * bbio and raid_map is managed by the caller, so we shouldn't free
- * them here. And besides that, all rbios with this flag should not
- * be cached, because we need raid_map to check the rbios' stripe
- * is the same or not, but it is very likely that the caller has
- * free raid_map, so don't cache those rbios.
- */
-#define RBIO_HOLD_BBIO_MAP_BIT 4
-
 #define RBIO_CACHE_SIZE 1024
 
 enum btrfs_rbio_ops {
@@ -79,13 +70,6 @@ struct btrfs_raid_bio {
        struct btrfs_fs_info *fs_info;
        struct btrfs_bio *bbio;
 
-       /*
-        * logical block numbers for the start of each stripe
-        * The last one or two are p/q.  These are sorted,
-        * so raid_map[0] is the start of our full stripe
-        */
-       u64 *raid_map;
-
        /* while we're doing rmw on a stripe
         * we put it into a hash table so we can
         * lock the stripe and merge more rbios
@@ -303,7 +287,7 @@ static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
  */
 static int rbio_bucket(struct btrfs_raid_bio *rbio)
 {
-       u64 num = rbio->raid_map[0];
+       u64 num = rbio->bbio->raid_map[0];
 
        /*
         * we shift down quite a bit.  We're using byte
@@ -606,8 +590,8 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
            test_bit(RBIO_CACHE_BIT, &cur->flags))
                return 0;
 
-       if (last->raid_map[0] !=
-           cur->raid_map[0])
+       if (last->bbio->raid_map[0] !=
+           cur->bbio->raid_map[0])
                return 0;
 
        /* we can't merge with different operations */
@@ -689,7 +673,7 @@ static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
        spin_lock_irqsave(&h->lock, flags);
        list_for_each_entry(cur, &h->hash_list, hash_list) {
                walk++;
-               if (cur->raid_map[0] == rbio->raid_map[0]) {
+               if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
                        spin_lock(&cur->bio_list_lock);
 
                        /* can we steal this cached rbio's pages? */
@@ -841,21 +825,6 @@ done_nolock:
                remove_rbio_from_cache(rbio);
 }
 
-static inline void
-__free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need)
-{
-       if (need) {
-               kfree(raid_map);
-               kfree(bbio);
-       }
-}
-
-static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio)
-{
-       __free_bbio_and_raid_map(rbio->bbio, rbio->raid_map,
-                       !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags));
-}
-
 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
 {
        int i;
@@ -875,8 +844,7 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
                }
        }
 
-       free_bbio_and_raid_map(rbio);
-
+       btrfs_put_bbio(rbio->bbio);
        kfree(rbio);
 }
 
@@ -985,8 +953,7 @@ static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
  * this does not allocate any pages for rbio->pages.
  */
 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
-                         struct btrfs_bio *bbio, u64 *raid_map,
-                         u64 stripe_len)
+                         struct btrfs_bio *bbio, u64 stripe_len)
 {
        struct btrfs_raid_bio *rbio;
        int nr_data = 0;
@@ -1007,7 +974,6 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
        INIT_LIST_HEAD(&rbio->stripe_cache);
        INIT_LIST_HEAD(&rbio->hash_list);
        rbio->bbio = bbio;
-       rbio->raid_map = raid_map;
        rbio->fs_info = root->fs_info;
        rbio->stripe_len = stripe_len;
        rbio->nr_pages = num_pages;
@@ -1028,10 +994,12 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
        rbio->bio_pages = p + sizeof(struct page *) * num_pages;
        rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
 
-       if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
+       if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
+               nr_data = real_stripes - 1;
+       else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
                nr_data = real_stripes - 2;
        else
-               nr_data = real_stripes - 1;
+               BUG();
 
        rbio->nr_data = nr_data;
        return rbio;
@@ -1182,7 +1150,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
        spin_lock_irq(&rbio->bio_list_lock);
        bio_list_for_each(bio, &rbio->bio_list) {
                start = (u64)bio->bi_iter.bi_sector << 9;
-               stripe_offset = start - rbio->raid_map[0];
+               stripe_offset = start - rbio->bbio->raid_map[0];
                page_index = stripe_offset >> PAGE_CACHE_SHIFT;
 
                for (i = 0; i < bio->bi_vcnt; i++) {
@@ -1402,7 +1370,7 @@ static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
        logical <<= 9;
 
        for (i = 0; i < rbio->nr_data; i++) {
-               stripe_start = rbio->raid_map[i];
+               stripe_start = rbio->bbio->raid_map[i];
                if (logical >= stripe_start &&
                    logical < stripe_start + rbio->stripe_len) {
                        return i;
@@ -1776,17 +1744,16 @@ static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
  * our main entry point for writes from the rest of the FS.
  */
 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
-                       struct btrfs_bio *bbio, u64 *raid_map,
-                       u64 stripe_len)
+                       struct btrfs_bio *bbio, u64 stripe_len)
 {
        struct btrfs_raid_bio *rbio;
        struct btrfs_plug_cb *plug = NULL;
        struct blk_plug_cb *cb;
        int ret;
 
-       rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+       rbio = alloc_rbio(root, bbio, stripe_len);
        if (IS_ERR(rbio)) {
-               __free_bbio_and_raid_map(bbio, raid_map, 1);
+               btrfs_put_bbio(bbio);
                return PTR_ERR(rbio);
        }
        bio_list_add(&rbio->bio_list, bio);
@@ -1885,9 +1852,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
                }
 
                /* all raid6 handling here */
-               if (rbio->raid_map[rbio->real_stripes - 1] ==
-                   RAID6_Q_STRIPE) {
-
+               if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
                        /*
                         * single failure, rebuild from parity raid5
                         * style
@@ -1922,8 +1887,9 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
                         * here due to a crc mismatch and we can't give them the
                         * data they want
                         */
-                       if (rbio->raid_map[failb] == RAID6_Q_STRIPE) {
-                               if (rbio->raid_map[faila] == RAID5_P_STRIPE) {
+                       if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
+                               if (rbio->bbio->raid_map[faila] ==
+                                   RAID5_P_STRIPE) {
                                        err = -EIO;
                                        goto cleanup;
                                }
@@ -1934,7 +1900,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
                                goto pstripe;
                        }
 
-                       if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
+                       if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
                                raid6_datap_recov(rbio->real_stripes,
                                                  PAGE_SIZE, faila, pointers);
                        } else {
@@ -2001,8 +1967,7 @@ cleanup:
 
 cleanup_io:
        if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
-               if (err == 0 &&
-                   !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags))
+               if (err == 0)
                        cache_rbio_pages(rbio);
                else
                        clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
@@ -2156,15 +2121,16 @@ cleanup:
  * of the drive.
  */
 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
-                         struct btrfs_bio *bbio, u64 *raid_map,
-                         u64 stripe_len, int mirror_num, int generic_io)
+                         struct btrfs_bio *bbio, u64 stripe_len,
+                         int mirror_num, int generic_io)
 {
        struct btrfs_raid_bio *rbio;
        int ret;
 
-       rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+       rbio = alloc_rbio(root, bbio, stripe_len);
        if (IS_ERR(rbio)) {
-               __free_bbio_and_raid_map(bbio, raid_map, generic_io);
+               if (generic_io)
+                       btrfs_put_bbio(bbio);
                return PTR_ERR(rbio);
        }
 
@@ -2175,7 +2141,8 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
        rbio->faila = find_logical_bio_stripe(rbio, bio);
        if (rbio->faila == -1) {
                BUG();
-               __free_bbio_and_raid_map(bbio, raid_map, generic_io);
+               if (generic_io)
+                       btrfs_put_bbio(bbio);
                kfree(rbio);
                return -EIO;
        }
@@ -2184,7 +2151,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
                btrfs_bio_counter_inc_noblocked(root->fs_info);
                rbio->generic_bio_cnt = 1;
        } else {
-               set_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags);
+               btrfs_get_bbio(bbio);
        }
 
        /*
@@ -2240,14 +2207,14 @@ static void read_rebuild_work(struct btrfs_work *work)
 
 struct btrfs_raid_bio *
 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
-                              struct btrfs_bio *bbio, u64 *raid_map,
-                              u64 stripe_len, struct btrfs_device *scrub_dev,
+                              struct btrfs_bio *bbio, u64 stripe_len,
+                              struct btrfs_device *scrub_dev,
                               unsigned long *dbitmap, int stripe_nsectors)
 {
        struct btrfs_raid_bio *rbio;
        int i;
 
-       rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
+       rbio = alloc_rbio(root, bbio, stripe_len);
        if (IS_ERR(rbio))
                return NULL;
        bio_list_add(&rbio->bio_list, bio);
@@ -2279,10 +2246,10 @@ void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
        int stripe_offset;
        int index;
 
-       ASSERT(logical >= rbio->raid_map[0]);
-       ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] +
+       ASSERT(logical >= rbio->bbio->raid_map[0]);
+       ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
                                rbio->stripe_len * rbio->nr_data);
-       stripe_offset = (int)(logical - rbio->raid_map[0]);
+       stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
        index = stripe_offset >> PAGE_CACHE_SHIFT;
        rbio->bio_pages[index] = page;
 }
index 31d4a157b5e3a153fb283a12e8852a962a63f1f2..2b5d7977d83b2248e40d7331e4fd7d6d831ef1d6 100644 (file)
@@ -43,16 +43,15 @@ struct btrfs_raid_bio;
 struct btrfs_device;
 
 int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
-                         struct btrfs_bio *bbio, u64 *raid_map,
-                         u64 stripe_len, int mirror_num, int generic_io);
+                         struct btrfs_bio *bbio, u64 stripe_len,
+                         int mirror_num, int generic_io);
 int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
-                              struct btrfs_bio *bbio, u64 *raid_map,
-                              u64 stripe_len);
+                              struct btrfs_bio *bbio, u64 stripe_len);
 
 struct btrfs_raid_bio *
 raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
-                              struct btrfs_bio *bbio, u64 *raid_map,
-                              u64 stripe_len, struct btrfs_device *scrub_dev,
+                              struct btrfs_bio *bbio, u64 stripe_len,
+                              struct btrfs_device *scrub_dev,
                               unsigned long *dbitmap, int stripe_nsectors);
 void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
                                   struct page *page, u64 logical);
index b63ae20618fb3f573d7917b088fa58a11f887293..0e7beea92b4cc1279def4a3c61a440ec177ef1a0 100644 (file)
@@ -66,7 +66,6 @@ struct reada_extctl {
 struct reada_extent {
        u64                     logical;
        struct btrfs_key        top;
-       u32                     blocksize;
        int                     err;
        struct list_head        extctl;
        int                     refcnt;
@@ -349,7 +348,6 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
 
        blocksize = root->nodesize;
        re->logical = logical;
-       re->blocksize = blocksize;
        re->top = *top;
        INIT_LIST_HEAD(&re->extctl);
        spin_lock_init(&re->lock);
@@ -463,7 +461,7 @@ static struct reada_extent *reada_find_extent(struct btrfs_root *root,
        spin_unlock(&fs_info->reada_lock);
        btrfs_dev_replace_unlock(&fs_info->dev_replace);
 
-       kfree(bbio);
+       btrfs_put_bbio(bbio);
        return re;
 
 error:
@@ -488,7 +486,7 @@ error:
                kref_put(&zone->refcnt, reada_zone_release);
                spin_unlock(&fs_info->reada_lock);
        }
-       kfree(bbio);
+       btrfs_put_bbio(bbio);
        kfree(re);
        return re_exist;
 }
@@ -660,7 +658,6 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
        int mirror_num = 0;
        struct extent_buffer *eb = NULL;
        u64 logical;
-       u32 blocksize;
        int ret;
        int i;
        int need_kick = 0;
@@ -694,7 +691,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
                spin_unlock(&fs_info->reada_lock);
                return 0;
        }
-       dev->reada_next = re->logical + re->blocksize;
+       dev->reada_next = re->logical + fs_info->tree_root->nodesize;
        re->refcnt++;
 
        spin_unlock(&fs_info->reada_lock);
@@ -709,7 +706,6 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
                }
        }
        logical = re->logical;
-       blocksize = re->blocksize;
 
        spin_lock(&re->lock);
        if (re->scheduled_for == NULL) {
@@ -724,8 +720,8 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
                return 0;
 
        atomic_inc(&dev->reada_in_flight);
-       ret = reada_tree_block_flagged(fs_info->extent_root, logical, blocksize,
-                        mirror_num, &eb);
+       ret = reada_tree_block_flagged(fs_info->extent_root, logical,
+                       mirror_num, &eb);
        if (ret)
                __readahead_hook(fs_info->extent_root, NULL, logical, ret);
        else if (eb)
@@ -851,7 +847,7 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
                                break;
                        printk(KERN_DEBUG
                                "  re: logical %llu size %u empty %d for %lld",
-                               re->logical, re->blocksize,
+                               re->logical, fs_info->tree_root->nodesize,
                                list_empty(&re->extctl), re->scheduled_for ?
                                re->scheduled_for->devid : -1);
 
@@ -886,7 +882,8 @@ static void dump_devs(struct btrfs_fs_info *fs_info, int all)
                }
                printk(KERN_DEBUG
                        "re: logical %llu size %u list empty %d for %lld",
-                       re->logical, re->blocksize, list_empty(&re->extctl),
+                       re->logical, fs_info->tree_root->nodesize,
+                       list_empty(&re->extctl),
                        re->scheduled_for ? re->scheduled_for->devid : -1);
                for (i = 0; i < re->nzones; ++i) {
                        printk(KERN_CONT " zone %llu-%llu devs",
index 74257d6436adda1b772d8658be41202093ef577a..d83085381bccfa745ee0258b8cab5558afbcd639 100644 (file)
@@ -2855,9 +2855,10 @@ static void update_processed_blocks(struct reloc_control *rc,
        }
 }
 
-static int tree_block_processed(u64 bytenr, u32 blocksize,
-                               struct reloc_control *rc)
+static int tree_block_processed(u64 bytenr, struct reloc_control *rc)
 {
+       u32 blocksize = rc->extent_root->nodesize;
+
        if (test_range_bit(&rc->processed_blocks, bytenr,
                           bytenr + blocksize - 1, EXTENT_DIRTY, 1, NULL))
                return 1;
@@ -2965,8 +2966,7 @@ int relocate_tree_blocks(struct btrfs_trans_handle *trans,
        while (rb_node) {
                block = rb_entry(rb_node, struct tree_block, rb_node);
                if (!block->key_ready)
-                       readahead_tree_block(rc->extent_root, block->bytenr,
-                                       block->key.objectid);
+                       readahead_tree_block(rc->extent_root, block->bytenr);
                rb_node = rb_next(rb_node);
        }
 
@@ -3353,7 +3353,7 @@ static int __add_tree_block(struct reloc_control *rc,
        bool skinny = btrfs_fs_incompat(rc->extent_root->fs_info,
                                        SKINNY_METADATA);
 
-       if (tree_block_processed(bytenr, blocksize, rc))
+       if (tree_block_processed(bytenr, rc))
                return 0;
 
        if (tree_search(blocks, bytenr))
@@ -3611,7 +3611,7 @@ static int find_data_references(struct reloc_control *rc,
                if (added)
                        goto next;
 
-               if (!tree_block_processed(leaf->start, leaf->len, rc)) {
+               if (!tree_block_processed(leaf->start, rc)) {
                        block = kmalloc(sizeof(*block), GFP_NOFS);
                        if (!block) {
                                err = -ENOMEM;
index e427cb7ee12c7d848cd16402d78854e22db969dd..ec57687c9a4d8a079466d6e9f95724188ec03bc7 100644 (file)
@@ -66,7 +66,6 @@ struct scrub_ctx;
 struct scrub_recover {
        atomic_t                refs;
        struct btrfs_bio        *bbio;
-       u64                     *raid_map;
        u64                     map_length;
 };
 
@@ -80,7 +79,7 @@ struct scrub_page {
        u64                     logical;
        u64                     physical;
        u64                     physical_for_dev_replace;
-       atomic_t                ref_count;
+       atomic_t                refs;
        struct {
                unsigned int    mirror_num:8;
                unsigned int    have_csum:1;
@@ -113,7 +112,7 @@ struct scrub_block {
        struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
        int                     page_count;
        atomic_t                outstanding_pages;
-       atomic_t                ref_count; /* free mem on transition to zero */
+       atomic_t                refs; /* free mem on transition to zero */
        struct scrub_ctx        *sctx;
        struct scrub_parity     *sparity;
        struct {
@@ -142,7 +141,7 @@ struct scrub_parity {
 
        int                     stripe_len;
 
-       atomic_t                ref_count;
+       atomic_t                refs;
 
        struct list_head        spages;
 
@@ -194,6 +193,15 @@ struct scrub_ctx {
         */
        struct btrfs_scrub_progress stat;
        spinlock_t              stat_lock;
+
+       /*
+        * Use a ref counter to avoid use-after-free issues. Scrub workers
+        * decrement bios_in_flight and workers_pending and then do a wakeup
+        * on the list_wait wait queue. We must ensure the main scrub task
+        * doesn't free the scrub context before or while the workers are
+        * doing the wakeup() call.
+        */
+       atomic_t                refs;
 };
 
 struct scrub_fixup_nodatasum {
@@ -236,10 +244,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
-static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
-                                    struct btrfs_fs_info *fs_info,
-                                    struct scrub_block *original_sblock,
-                                    u64 length, u64 logical,
+static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
                                     struct scrub_block *sblocks_for_recheck);
 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
                                struct scrub_block *sblock, int is_metadata,
@@ -251,8 +256,7 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
                                         const u8 *csum, u64 generation,
                                         u16 csum_size);
 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
-                                            struct scrub_block *sblock_good,
-                                            int force_write);
+                                            struct scrub_block *sblock_good);
 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
                                            struct scrub_block *sblock_good,
                                            int page_num, int force_write);
@@ -302,10 +306,12 @@ static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 static void copy_nocow_pages_worker(struct btrfs_work *work);
 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
+static void scrub_put_ctx(struct scrub_ctx *sctx);
 
 
 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
 {
+       atomic_inc(&sctx->refs);
        atomic_inc(&sctx->bios_in_flight);
 }
 
@@ -313,6 +319,7 @@ static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
 {
        atomic_dec(&sctx->bios_in_flight);
        wake_up(&sctx->list_wait);
+       scrub_put_ctx(sctx);
 }
 
 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
@@ -346,6 +353,7 @@ static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
 {
        struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
 
+       atomic_inc(&sctx->refs);
        /*
         * increment scrubs_running to prevent cancel requests from
         * completing as long as a worker is running. we must also
@@ -388,6 +396,7 @@ static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
        atomic_dec(&sctx->workers_pending);
        wake_up(&fs_info->scrub_pause_wait);
        wake_up(&sctx->list_wait);
+       scrub_put_ctx(sctx);
 }
 
 static void scrub_free_csums(struct scrub_ctx *sctx)
@@ -433,6 +442,12 @@ static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
        kfree(sctx);
 }
 
+static void scrub_put_ctx(struct scrub_ctx *sctx)
+{
+       if (atomic_dec_and_test(&sctx->refs))
+               scrub_free_ctx(sctx);
+}
+
 static noinline_for_stack
 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
 {
@@ -457,6 +472,7 @@ struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
        sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
        if (!sctx)
                goto nomem;
+       atomic_set(&sctx->refs, 1);
        sctx->is_dev_replace = is_dev_replace;
        sctx->pages_per_rd_bio = pages_per_rd_bio;
        sctx->curr = -1;
@@ -520,6 +536,7 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
        struct inode_fs_paths *ipath = NULL;
        struct btrfs_root *local_root;
        struct btrfs_key root_key;
+       struct btrfs_key key;
 
        root_key.objectid = root;
        root_key.type = BTRFS_ROOT_ITEM_KEY;
@@ -530,7 +547,14 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
                goto err;
        }
 
-       ret = inode_item_info(inum, 0, local_root, swarn->path);
+       /*
+        * this makes the path point to (inum INODE_ITEM ioff)
+        */
+       key.objectid = inum;
+       key.type = BTRFS_INODE_ITEM_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
        if (ret) {
                btrfs_release_path(swarn->path);
                goto err;
@@ -848,8 +872,7 @@ static inline void scrub_get_recover(struct scrub_recover *recover)
 static inline void scrub_put_recover(struct scrub_recover *recover)
 {
        if (atomic_dec_and_test(&recover->refs)) {
-               kfree(recover->bbio);
-               kfree(recover->raid_map);
+               btrfs_put_bbio(recover->bbio);
                kfree(recover);
        }
 }
@@ -955,8 +978,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
        }
 
        /* setup the context, map the logical blocks and alloc the pages */
-       ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
-                                       logical, sblocks_for_recheck);
+       ret = scrub_setup_recheck_block(sblock_to_check, sblocks_for_recheck);
        if (ret) {
                spin_lock(&sctx->stat_lock);
                sctx->stat.read_errors++;
@@ -1030,9 +1052,10 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
        if (!is_metadata && !have_csum) {
                struct scrub_fixup_nodatasum *fixup_nodatasum;
 
-nodatasum_case:
                WARN_ON(sctx->is_dev_replace);
 
+nodatasum_case:
+
                /*
                 * !is_metadata and !have_csum, this means that the data
                 * might not be COW'ed, that it might be modified
@@ -1091,76 +1114,20 @@ nodatasum_case:
                    sblock_other->no_io_error_seen) {
                        if (sctx->is_dev_replace) {
                                scrub_write_block_to_dev_replace(sblock_other);
+                               goto corrected_error;
                        } else {
-                               int force_write = is_metadata || have_csum;
-
                                ret = scrub_repair_block_from_good_copy(
-                                               sblock_bad, sblock_other,
-                                               force_write);
+                                               sblock_bad, sblock_other);
+                               if (!ret)
+                                       goto corrected_error;
                        }
-                       if (0 == ret)
-                               goto corrected_error;
                }
        }
 
-       /*
-        * for dev_replace, pick good pages and write to the target device.
-        */
-       if (sctx->is_dev_replace) {
-               success = 1;
-               for (page_num = 0; page_num < sblock_bad->page_count;
-                    page_num++) {
-                       int sub_success;
-
-                       sub_success = 0;
-                       for (mirror_index = 0;
-                            mirror_index < BTRFS_MAX_MIRRORS &&
-                            sblocks_for_recheck[mirror_index].page_count > 0;
-                            mirror_index++) {
-                               struct scrub_block *sblock_other =
-                                       sblocks_for_recheck + mirror_index;
-                               struct scrub_page *page_other =
-                                       sblock_other->pagev[page_num];
-
-                               if (!page_other->io_error) {
-                                       ret = scrub_write_page_to_dev_replace(
-                                                       sblock_other, page_num);
-                                       if (ret == 0) {
-                                               /* succeeded for this page */
-                                               sub_success = 1;
-                                               break;
-                                       } else {
-                                               btrfs_dev_replace_stats_inc(
-                                                       &sctx->dev_root->
-                                                       fs_info->dev_replace.
-                                                       num_write_errors);
-                                       }
-                               }
-                       }
-
-                       if (!sub_success) {
-                               /*
-                                * did not find a mirror to fetch the page
-                                * from. scrub_write_page_to_dev_replace()
-                                * handles this case (page->io_error), by
-                                * filling the block with zeros before
-                                * submitting the write request
-                                */
-                               success = 0;
-                               ret = scrub_write_page_to_dev_replace(
-                                               sblock_bad, page_num);
-                               if (ret)
-                                       btrfs_dev_replace_stats_inc(
-                                               &sctx->dev_root->fs_info->
-                                               dev_replace.num_write_errors);
-                       }
-               }
-
-               goto out;
-       }
+       if (sblock_bad->no_io_error_seen && !sctx->is_dev_replace)
+               goto did_not_correct_error;
 
        /*
-        * for regular scrub, repair those pages that are errored.
         * In case of I/O errors in the area that is supposed to be
         * repaired, continue by picking good copies of those pages.
         * Select the good pages from mirrors to rewrite bad pages from
@@ -1184,44 +1151,64 @@ nodatasum_case:
         * mirror, even if other 512 byte sectors in the same PAGE_SIZE
         * area are unreadable.
         */
-
-       /* can only fix I/O errors from here on */
-       if (sblock_bad->no_io_error_seen)
-               goto did_not_correct_error;
-
        success = 1;
-       for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
+       for (page_num = 0; page_num < sblock_bad->page_count;
+            page_num++) {
                struct scrub_page *page_bad = sblock_bad->pagev[page_num];
+               struct scrub_block *sblock_other = NULL;
 
-               if (!page_bad->io_error)
+               /* skip no-io-error page in scrub */
+               if (!page_bad->io_error && !sctx->is_dev_replace)
                        continue;
 
-               for (mirror_index = 0;
-                    mirror_index < BTRFS_MAX_MIRRORS &&
-                    sblocks_for_recheck[mirror_index].page_count > 0;
-                    mirror_index++) {
-                       struct scrub_block *sblock_other = sblocks_for_recheck +
-                                                          mirror_index;
-                       struct scrub_page *page_other = sblock_other->pagev[
-                                                       page_num];
-
-                       if (!page_other->io_error) {
-                               ret = scrub_repair_page_from_good_copy(
-                                       sblock_bad, sblock_other, page_num, 0);
-                               if (0 == ret) {
-                                       page_bad->io_error = 0;
-                                       break; /* succeeded for this page */
+               /* try to find no-io-error page in mirrors */
+               if (page_bad->io_error) {
+                       for (mirror_index = 0;
+                            mirror_index < BTRFS_MAX_MIRRORS &&
+                            sblocks_for_recheck[mirror_index].page_count > 0;
+                            mirror_index++) {
+                               if (!sblocks_for_recheck[mirror_index].
+                                   pagev[page_num]->io_error) {
+                                       sblock_other = sblocks_for_recheck +
+                                                      mirror_index;
+                                       break;
                                }
                        }
+                       if (!sblock_other)
+                               success = 0;
                }
 
-               if (page_bad->io_error) {
-                       /* did not find a mirror to copy the page from */
-                       success = 0;
+               if (sctx->is_dev_replace) {
+                       /*
+                        * did not find a mirror to fetch the page
+                        * from. scrub_write_page_to_dev_replace()
+                        * handles this case (page->io_error), by
+                        * filling the block with zeros before
+                        * submitting the write request
+                        */
+                       if (!sblock_other)
+                               sblock_other = sblock_bad;
+
+                       if (scrub_write_page_to_dev_replace(sblock_other,
+                                                           page_num) != 0) {
+                               btrfs_dev_replace_stats_inc(
+                                       &sctx->dev_root->
+                                       fs_info->dev_replace.
+                                       num_write_errors);
+                               success = 0;
+                       }
+               } else if (sblock_other) {
+                       ret = scrub_repair_page_from_good_copy(sblock_bad,
+                                                              sblock_other,
+                                                              page_num, 0);
+                       if (0 == ret)
+                               page_bad->io_error = 0;
+                       else
+                               success = 0;
                }
        }
 
-       if (success) {
+       if (success && !sctx->is_dev_replace) {
                if (is_metadata || have_csum) {
                        /*
                         * need to verify the checksum now that all
@@ -1288,19 +1275,18 @@ out:
        return 0;
 }
 
-static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map)
+static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
 {
-       if (raid_map) {
-               if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
-                       return 3;
-               else
-                       return 2;
-       } else {
+       if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
+               return 2;
+       else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
+               return 3;
+       else
                return (int)bbio->num_stripes;
-       }
 }
 
-static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
+static inline void scrub_stripe_index_and_offset(u64 logical, u64 map_type,
+                                                u64 *raid_map,
                                                 u64 mapped_length,
                                                 int nstripes, int mirror,
                                                 int *stripe_index,
@@ -1308,7 +1294,7 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
 {
        int i;
 
-       if (raid_map) {
+       if (map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                /* RAID5/6 */
                for (i = 0; i < nstripes; i++) {
                        if (raid_map[i] == RAID6_Q_STRIPE ||
@@ -1329,72 +1315,65 @@ static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
        }
 }
 
-static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
-                                    struct btrfs_fs_info *fs_info,
-                                    struct scrub_block *original_sblock,
-                                    u64 length, u64 logical,
+static int scrub_setup_recheck_block(struct scrub_block *original_sblock,
                                     struct scrub_block *sblocks_for_recheck)
 {
+       struct scrub_ctx *sctx = original_sblock->sctx;
+       struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
+       u64 length = original_sblock->page_count * PAGE_SIZE;
+       u64 logical = original_sblock->pagev[0]->logical;
        struct scrub_recover *recover;
        struct btrfs_bio *bbio;
-       u64 *raid_map;
        u64 sublen;
        u64 mapped_length;
        u64 stripe_offset;
        int stripe_index;
-       int page_index;
+       int page_index = 0;
        int mirror_index;
        int nmirrors;
        int ret;
 
        /*
-        * note: the two members ref_count and outstanding_pages
+        * note: the two members refs and outstanding_pages
         * are not used (and not set) in the blocks that are used for
         * the recheck procedure
         */
 
-       page_index = 0;
        while (length > 0) {
                sublen = min_t(u64, length, PAGE_SIZE);
                mapped_length = sublen;
                bbio = NULL;
-               raid_map = NULL;
 
                /*
                 * with a length of PAGE_SIZE, each returned stripe
                 * represents one mirror
                 */
                ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
-                                      &mapped_length, &bbio, 0, &raid_map);
+                                      &mapped_length, &bbio, 0, 1);
                if (ret || !bbio || mapped_length < sublen) {
-                       kfree(bbio);
-                       kfree(raid_map);
+                       btrfs_put_bbio(bbio);
                        return -EIO;
                }
 
                recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
                if (!recover) {
-                       kfree(bbio);
-                       kfree(raid_map);
+                       btrfs_put_bbio(bbio);
                        return -ENOMEM;
                }
 
                atomic_set(&recover->refs, 1);
                recover->bbio = bbio;
-               recover->raid_map = raid_map;
                recover->map_length = mapped_length;
 
                BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
 
-               nmirrors = scrub_nr_raid_mirrors(bbio, raid_map);
+               nmirrors = min(scrub_nr_raid_mirrors(bbio), BTRFS_MAX_MIRRORS);
+
                for (mirror_index = 0; mirror_index < nmirrors;
                     mirror_index++) {
                        struct scrub_block *sblock;
                        struct scrub_page *page;
 
-                       if (mirror_index >= BTRFS_MAX_MIRRORS)
-                               continue;
-
                        sblock = sblocks_for_recheck + mirror_index;
                        sblock->sctx = sctx;
                        page = kzalloc(sizeof(*page), GFP_NOFS);
@@ -1410,9 +1389,12 @@ leave_nomem:
                        sblock->pagev[page_index] = page;
                        page->logical = logical;
 
-                       scrub_stripe_index_and_offset(logical, raid_map,
+                       scrub_stripe_index_and_offset(logical,
+                                                     bbio->map_type,
+                                                     bbio->raid_map,
                                                      mapped_length,
-                                                     bbio->num_stripes,
+                                                     bbio->num_stripes -
+                                                     bbio->num_tgtdevs,
                                                      mirror_index,
                                                      &stripe_index,
                                                      &stripe_offset);
@@ -1458,7 +1440,8 @@ static void scrub_bio_wait_endio(struct bio *bio, int error)
 
 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
 {
-       return page->recover && page->recover->raid_map;
+       return page->recover &&
+              (page->recover->bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK);
 }
 
 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
@@ -1475,7 +1458,6 @@ static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
        bio->bi_end_io = scrub_bio_wait_endio;
 
        ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
-                                   page->recover->raid_map,
                                    page->recover->map_length,
                                    page->mirror_num, 0);
        if (ret)
@@ -1615,8 +1597,7 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
 }
 
 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
-                                            struct scrub_block *sblock_good,
-                                            int force_write)
+                                            struct scrub_block *sblock_good)
 {
        int page_num;
        int ret = 0;
@@ -1626,8 +1607,7 @@ static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
 
                ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
                                                           sblock_good,
-                                                          page_num,
-                                                          force_write);
+                                                          page_num, 1);
                if (ret_sub)
                        ret = ret_sub;
        }
@@ -2067,12 +2047,12 @@ static int scrub_checksum_super(struct scrub_block *sblock)
 
 static void scrub_block_get(struct scrub_block *sblock)
 {
-       atomic_inc(&sblock->ref_count);
+       atomic_inc(&sblock->refs);
 }
 
 static void scrub_block_put(struct scrub_block *sblock)
 {
-       if (atomic_dec_and_test(&sblock->ref_count)) {
+       if (atomic_dec_and_test(&sblock->refs)) {
                int i;
 
                if (sblock->sparity)
@@ -2086,12 +2066,12 @@ static void scrub_block_put(struct scrub_block *sblock)
 
 static void scrub_page_get(struct scrub_page *spage)
 {
-       atomic_inc(&spage->ref_count);
+       atomic_inc(&spage->refs);
 }
 
 static void scrub_page_put(struct scrub_page *spage)
 {
-       if (atomic_dec_and_test(&spage->ref_count)) {
+       if (atomic_dec_and_test(&spage->refs)) {
                if (spage->page)
                        __free_page(spage->page);
                kfree(spage);
@@ -2217,7 +2197,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
 
        /* one ref inside this function, plus one for each page added to
         * a bio later on */
-       atomic_set(&sblock->ref_count, 1);
+       atomic_set(&sblock->refs, 1);
        sblock->sctx = sctx;
        sblock->no_io_error_seen = 1;
 
@@ -2510,7 +2490,7 @@ static int scrub_pages_for_parity(struct scrub_parity *sparity,
 
        /* one ref inside this function, plus one for each page added to
         * a bio later on */
-       atomic_set(&sblock->ref_count, 1);
+       atomic_set(&sblock->refs, 1);
        sblock->sctx = sctx;
        sblock->no_io_error_seen = 1;
        sblock->sparity = sparity;
@@ -2705,7 +2685,6 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
        struct btrfs_raid_bio *rbio;
        struct scrub_page *spage;
        struct btrfs_bio *bbio = NULL;
-       u64 *raid_map = NULL;
        u64 length;
        int ret;
 
@@ -2716,8 +2695,8 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
        length = sparity->logic_end - sparity->logic_start + 1;
        ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
                               sparity->logic_start,
-                              &length, &bbio, 0, &raid_map);
-       if (ret || !bbio || !raid_map)
+                              &length, &bbio, 0, 1);
+       if (ret || !bbio || !bbio->raid_map)
                goto bbio_out;
 
        bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
@@ -2729,8 +2708,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
        bio->bi_end_io = scrub_parity_bio_endio;
 
        rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
-                                             raid_map, length,
-                                             sparity->scrub_dev,
+                                             length, sparity->scrub_dev,
                                              sparity->dbitmap,
                                              sparity->nsectors);
        if (!rbio)
@@ -2747,8 +2725,7 @@ static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
 rbio_out:
        bio_put(bio);
 bbio_out:
-       kfree(bbio);
-       kfree(raid_map);
+       btrfs_put_bbio(bbio);
        bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
                  sparity->nsectors);
        spin_lock(&sctx->stat_lock);
@@ -2765,12 +2742,12 @@ static inline int scrub_calc_parity_bitmap_len(int nsectors)
 
 static void scrub_parity_get(struct scrub_parity *sparity)
 {
-       atomic_inc(&sparity->ref_count);
+       atomic_inc(&sparity->refs);
 }
 
 static void scrub_parity_put(struct scrub_parity *sparity)
 {
-       if (!atomic_dec_and_test(&sparity->ref_count))
+       if (!atomic_dec_and_test(&sparity->refs))
                return;
 
        scrub_parity_check_and_repair(sparity);
@@ -2820,7 +2797,7 @@ static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
        sparity->scrub_dev = sdev;
        sparity->logic_start = logic_start;
        sparity->logic_end = logic_end;
-       atomic_set(&sparity->ref_count, 1);
+       atomic_set(&sparity->refs, 1);
        INIT_LIST_HEAD(&sparity->spages);
        sparity->dbitmap = sparity->bitmap;
        sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
@@ -3037,8 +3014,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
                increment = map->stripe_len;
                mirror_num = num % map->num_stripes + 1;
-       } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                               BTRFS_BLOCK_GROUP_RAID6)) {
+       } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                get_raid56_logic_offset(physical, num, map, &offset, NULL);
                increment = map->stripe_len * nr_data_stripes(map);
                mirror_num = 1;
@@ -3074,8 +3050,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
         */
        logical = base + offset;
        physical_end = physical + nstripes * map->stripe_len;
-       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                        BTRFS_BLOCK_GROUP_RAID6)) {
+       if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                get_raid56_logic_offset(physical_end, num,
                                        map, &logic_end, NULL);
                logic_end += base;
@@ -3121,8 +3096,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
        ret = 0;
        while (physical < physical_end) {
                /* for raid56, we skip parity stripe */
-               if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                               BTRFS_BLOCK_GROUP_RAID6)) {
+               if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                        ret = get_raid56_logic_offset(physical, num,
                                        map, &logical, &stripe_logical);
                        logical += base;
@@ -3280,8 +3254,7 @@ again:
                        scrub_free_csums(sctx);
                        if (extent_logical + extent_len <
                            key.objectid + bytes) {
-                               if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                                       BTRFS_BLOCK_GROUP_RAID6)) {
+                               if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                                        /*
                                         * loop until we find next data stripe
                                         * or we have finished all stripes.
@@ -3775,7 +3748,7 @@ int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
        scrub_workers_put(fs_info);
        mutex_unlock(&fs_info->scrub_lock);
 
-       scrub_free_ctx(sctx);
+       scrub_put_ctx(sctx);
 
        return ret;
 }
@@ -3881,14 +3854,14 @@ static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
                              &mapped_length, &bbio, 0);
        if (ret || !bbio || mapped_length < extent_len ||
            !bbio->stripes[0].dev->bdev) {
-               kfree(bbio);
+               btrfs_put_bbio(bbio);
                return;
        }
 
        *extent_physical = bbio->stripes[0].physical;
        *extent_mirror_num = bbio->mirror_num;
        *extent_dev = bbio->stripes[0].dev;
-       kfree(bbio);
+       btrfs_put_bbio(bbio);
 }
 
 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
index 804432dbc351d8b73064724cbfbc08237d522298..fe5857223515d14753c1f75ce60c2c12e16a3e67 100644 (file)
@@ -2471,12 +2471,9 @@ verbose_printk("btrfs: send_utimes %llu\n", ino);
        if (ret < 0)
                goto out;
        TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH, p);
-       TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb,
-                       btrfs_inode_atime(ii));
-       TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb,
-                       btrfs_inode_mtime(ii));
-       TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb,
-                       btrfs_inode_ctime(ii));
+       TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_ATIME, eb, &ii->atime);
+       TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_MTIME, eb, &ii->mtime);
+       TLV_PUT_BTRFS_TIMESPEC(sctx, BTRFS_SEND_A_CTIME, eb, &ii->ctime);
        /* TODO Add otime support when the otime patches get into upstream */
 
        ret = send_cmd(sctx);
index 6f49b2872a6454330bac0ef912be3d0152e2ef4f..05fef198ff94fc8df4eb126a62195af8452fb18d 100644 (file)
@@ -1958,11 +1958,6 @@ static int btrfs_freeze(struct super_block *sb)
        return btrfs_commit_transaction(trans, root);
 }
 
-static int btrfs_unfreeze(struct super_block *sb)
-{
-       return 0;
-}
-
 static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
 {
        struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
@@ -2011,7 +2006,6 @@ static const struct super_operations btrfs_super_ops = {
        .statfs         = btrfs_statfs,
        .remount_fs     = btrfs_remount,
        .freeze_fs      = btrfs_freeze,
-       .unfreeze_fs    = btrfs_unfreeze,
 };
 
 static const struct file_operations btrfs_ctl_fops = {
index 92db3f648df40cc5d1fe3428a89348efffe45776..94edb0a2a026652b6cb017f45cc5e1ef9d521277 100644 (file)
@@ -733,10 +733,18 @@ int btrfs_init_sysfs(void)
 
        ret = btrfs_init_debugfs();
        if (ret)
-               return ret;
+               goto out1;
 
        init_feature_attrs();
        ret = sysfs_create_group(&btrfs_kset->kobj, &btrfs_feature_attr_group);
+       if (ret)
+               goto out2;
+
+       return 0;
+out2:
+       debugfs_remove_recursive(btrfs_debugfs_root_dentry);
+out1:
+       kset_unregister(btrfs_kset);
 
        return ret;
 }
index cc286ce97d1e92b87308ed6c037406f68f302cb9..f51963a8f929e97d8030ca1a1e228263094cf244 100644 (file)
@@ -53,7 +53,7 @@ static int test_btrfs_split_item(void)
                return -ENOMEM;
        }
 
-       path->nodes[0] = eb = alloc_dummy_extent_buffer(0, 4096);
+       path->nodes[0] = eb = alloc_dummy_extent_buffer(NULL, 4096);
        if (!eb) {
                test_msg("Could not allocate dummy buffer\n");
                ret = -ENOMEM;
index 7e99c2f98dd007984d5a5e16678cf42b1c2950c4..9e9f2368177d42114726fe4859ca938c8cd8079e 100644 (file)
@@ -258,8 +258,7 @@ static int test_find_delalloc(void)
        }
        ret = 0;
 out_bits:
-       clear_extent_bits(&tmp, 0, total_dirty - 1,
-                         (unsigned long)-1, GFP_NOFS);
+       clear_extent_bits(&tmp, 0, total_dirty - 1, (unsigned)-1, GFP_NOFS);
 out:
        if (locked_page)
                page_cache_release(locked_page);
index 3ae0f5b8bb80d619979919ff15d9241681f00e1a..a116b55ce7880a5e64837dfd6cd66f7412e27cec 100644 (file)
@@ -255,7 +255,7 @@ static noinline int test_btrfs_get_extent(void)
                goto out;
        }
 
-       root->node = alloc_dummy_extent_buffer(0, 4096);
+       root->node = alloc_dummy_extent_buffer(NULL, 4096);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                goto out;
@@ -843,7 +843,7 @@ static int test_hole_first(void)
                goto out;
        }
 
-       root->node = alloc_dummy_extent_buffer(0, 4096);
+       root->node = alloc_dummy_extent_buffer(NULL, 4096);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                goto out;
index ec3dcb20235774044e4b92e1230b1b0af593aea4..73f299ebdabb4389964fafeb7506bdd9716dea67 100644 (file)
@@ -404,12 +404,22 @@ int btrfs_test_qgroups(void)
                ret = -ENOMEM;
                goto out;
        }
+       /* We are using this root as our extent root */
+       root->fs_info->extent_root = root;
+
+       /*
+        * Some of the paths we test assume we have a filled out fs_info, so we
+        * just need to add the root in there so we don't panic.
+        */
+       root->fs_info->tree_root = root;
+       root->fs_info->quota_root = root;
+       root->fs_info->quota_enabled = 1;
 
        /*
         * Can't use bytenr 0, some things freak out
         * *cough*backref walking code*cough*
         */
-       root->node = alloc_test_extent_buffer(root->fs_info, 4096, 4096);
+       root->node = alloc_test_extent_buffer(root->fs_info, 4096);
        if (!root->node) {
                test_msg("Couldn't allocate dummy buffer\n");
                ret = -ENOMEM;
@@ -448,17 +458,6 @@ int btrfs_test_qgroups(void)
                goto out;
        }
 
-       /* We are using this root as our extent root */
-       root->fs_info->extent_root = root;
-
-       /*
-        * Some of the paths we test assume we have a filled out fs_info, so we
-        * just need to addt he root in there so we don't panic.
-        */
-       root->fs_info->tree_root = root;
-       root->fs_info->quota_root = root;
-       root->fs_info->quota_enabled = 1;
-
        test_msg("Running qgroup tests\n");
        ret = test_no_shared_qgroup(root);
        if (ret)
index e88b59d13439690f15810359ee7be343ad86b7a9..7e80f32550a663e7438d3e5fb8b1c37fcc391b64 100644 (file)
@@ -220,6 +220,7 @@ loop:
         * commit the transaction.
         */
        atomic_set(&cur_trans->use_count, 2);
+       cur_trans->have_free_bgs = 0;
        cur_trans->start_time = get_seconds();
 
        cur_trans->delayed_refs.href_root = RB_ROOT;
@@ -248,6 +249,8 @@ loop:
        INIT_LIST_HEAD(&cur_trans->pending_chunks);
        INIT_LIST_HEAD(&cur_trans->switch_commits);
        INIT_LIST_HEAD(&cur_trans->pending_ordered);
+       INIT_LIST_HEAD(&cur_trans->dirty_bgs);
+       spin_lock_init(&cur_trans->dirty_bgs_lock);
        list_add_tail(&cur_trans->list, &fs_info->trans_list);
        extent_io_tree_init(&cur_trans->dirty_pages,
                             fs_info->btree_inode->i_mapping);
@@ -1020,6 +1023,7 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
        u64 old_root_bytenr;
        u64 old_root_used;
        struct btrfs_root *tree_root = root->fs_info->tree_root;
+       bool extent_root = (root->objectid == BTRFS_EXTENT_TREE_OBJECTID);
 
        old_root_used = btrfs_root_used(&root->root_item);
        btrfs_write_dirty_block_groups(trans, root);
@@ -1027,7 +1031,9 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
        while (1) {
                old_root_bytenr = btrfs_root_bytenr(&root->root_item);
                if (old_root_bytenr == root->node->start &&
-                   old_root_used == btrfs_root_used(&root->root_item))
+                   old_root_used == btrfs_root_used(&root->root_item) &&
+                   (!extent_root ||
+                    list_empty(&trans->transaction->dirty_bgs)))
                        break;
 
                btrfs_set_root_node(&root->root_item, root->node);
@@ -1038,7 +1044,15 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
                        return ret;
 
                old_root_used = btrfs_root_used(&root->root_item);
-               ret = btrfs_write_dirty_block_groups(trans, root);
+               if (extent_root) {
+                       ret = btrfs_write_dirty_block_groups(trans, root);
+                       if (ret)
+                               return ret;
+               }
+               ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
+               if (ret)
+                       return ret;
+               ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
                if (ret)
                        return ret;
        }
@@ -1061,10 +1075,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
        struct extent_buffer *eb;
        int ret;
 
-       ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
-       if (ret)
-               return ret;
-
        eb = btrfs_lock_root_node(fs_info->tree_root);
        ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
                              0, &eb);
@@ -1097,6 +1107,7 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
                next = fs_info->dirty_cowonly_roots.next;
                list_del_init(next);
                root = list_entry(next, struct btrfs_root, dirty_list);
+               clear_bit(BTRFS_ROOT_DIRTY, &root->state);
 
                if (root != fs_info->extent_root)
                        list_add_tail(&root->dirty_list,
@@ -1983,6 +1994,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
        switch_commit_roots(cur_trans, root->fs_info);
 
        assert_qgroups_uptodate(trans);
+       ASSERT(list_empty(&cur_trans->dirty_bgs));
        update_super_roots(root);
 
        btrfs_set_super_log_root(root->fs_info->super_copy, 0);
@@ -2026,6 +2038,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
 
        btrfs_finish_extent_commit(trans, root);
 
+       if (cur_trans->have_free_bgs)
+               btrfs_clear_space_info_full(root->fs_info);
+
        root->fs_info->last_trans_committed = cur_trans->transid;
        /*
         * We needn't acquire the lock here because there is no other task
index 00ed29c4b3f9d0ee4bc10f3e2da2424a8642020b..937050a2b68edaf6bdd6027f23c6c6d37b8257d5 100644 (file)
@@ -47,6 +47,11 @@ struct btrfs_transaction {
        atomic_t num_writers;
        atomic_t use_count;
 
+       /*
+        * true if there is free bgs operations in this transaction
+        */
+       int have_free_bgs;
+
        /* Be protected by fs_info->trans_lock when we want to change it. */
        enum btrfs_trans_state state;
        struct list_head list;
@@ -58,6 +63,8 @@ struct btrfs_transaction {
        struct list_head pending_chunks;
        struct list_head pending_ordered;
        struct list_head switch_commits;
+       struct list_head dirty_bgs;
+       spinlock_t dirty_bgs_lock;
        struct btrfs_delayed_ref_root delayed_refs;
        int aborted;
 };
index 1a9585d4380a330f96ba7d82f63cb87314d1701e..9a37f8b39bae9058a20aafe11c59793409115005 100644 (file)
@@ -453,11 +453,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans,
 insert:
        btrfs_release_path(path);
        /* try to insert the key into the destination tree */
+       path->skip_release_on_error = 1;
        ret = btrfs_insert_empty_item(trans, root, path,
                                      key, item_size);
+       path->skip_release_on_error = 0;
 
        /* make sure any existing item is the correct size */
-       if (ret == -EEXIST) {
+       if (ret == -EEXIST || ret == -EOVERFLOW) {
                u32 found_size;
                found_size = btrfs_item_size_nr(path->nodes[0],
                                                path->slots[0]);
@@ -488,8 +490,20 @@ insert:
                src_item = (struct btrfs_inode_item *)src_ptr;
                dst_item = (struct btrfs_inode_item *)dst_ptr;
 
-               if (btrfs_inode_generation(eb, src_item) == 0)
+               if (btrfs_inode_generation(eb, src_item) == 0) {
+                       struct extent_buffer *dst_eb = path->nodes[0];
+
+                       if (S_ISREG(btrfs_inode_mode(eb, src_item)) &&
+                           S_ISREG(btrfs_inode_mode(dst_eb, dst_item))) {
+                               struct btrfs_map_token token;
+                               u64 ino_size = btrfs_inode_size(eb, src_item);
+
+                               btrfs_init_map_token(&token);
+                               btrfs_set_token_inode_size(dst_eb, dst_item,
+                                                          ino_size, &token);
+                       }
                        goto no_copy;
+               }
 
                if (overwrite_root &&
                    S_ISDIR(btrfs_inode_mode(eb, src_item)) &&
@@ -844,7 +858,7 @@ out:
 static noinline int backref_in_log(struct btrfs_root *log,
                                   struct btrfs_key *key,
                                   u64 ref_objectid,
-                                  char *name, int namelen)
+                                  const char *name, int namelen)
 {
        struct btrfs_path *path;
        struct btrfs_inode_ref *ref;
@@ -1254,13 +1268,14 @@ out:
 }
 
 static int insert_orphan_item(struct btrfs_trans_handle *trans,
-                             struct btrfs_root *root, u64 offset)
+                             struct btrfs_root *root, u64 ino)
 {
        int ret;
-       ret = btrfs_find_item(root, NULL, BTRFS_ORPHAN_OBJECTID,
-                       offset, BTRFS_ORPHAN_ITEM_KEY, NULL);
-       if (ret > 0)
-               ret = btrfs_insert_orphan_item(trans, root, offset);
+
+       ret = btrfs_insert_orphan_item(trans, root, ino);
+       if (ret == -EEXIST)
+               ret = 0;
+
        return ret;
 }
 
@@ -1287,6 +1302,7 @@ static int count_inode_extrefs(struct btrfs_root *root,
                leaf = path->nodes[0];
                item_size = btrfs_item_size_nr(leaf, path->slots[0]);
                ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
+               cur_offset = 0;
 
                while (cur_offset < item_size) {
                        extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
@@ -1302,7 +1318,7 @@ static int count_inode_extrefs(struct btrfs_root *root,
        }
        btrfs_release_path(path);
 
-       if (ret < 0)
+       if (ret < 0 && ret != -ENOENT)
                return ret;
        return nlink;
 }
@@ -1394,9 +1410,6 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans,
        nlink = ret;
 
        ret = count_inode_extrefs(root, inode, path);
-       if (ret == -ENOENT)
-               ret = 0;
-
        if (ret < 0)
                goto out;
 
@@ -1556,6 +1569,30 @@ static noinline int insert_one_name(struct btrfs_trans_handle *trans,
        return ret;
 }
 
+/*
+ * Return true if an inode reference exists in the log for the given name,
+ * inode and parent inode.
+ */
+static bool name_in_log_ref(struct btrfs_root *log_root,
+                           const char *name, const int name_len,
+                           const u64 dirid, const u64 ino)
+{
+       struct btrfs_key search_key;
+
+       search_key.objectid = ino;
+       search_key.type = BTRFS_INODE_REF_KEY;
+       search_key.offset = dirid;
+       if (backref_in_log(log_root, &search_key, dirid, name, name_len))
+               return true;
+
+       search_key.type = BTRFS_INODE_EXTREF_KEY;
+       search_key.offset = btrfs_extref_hash(dirid, name, name_len);
+       if (backref_in_log(log_root, &search_key, dirid, name, name_len))
+               return true;
+
+       return false;
+}
+
 /*
  * take a single entry in a log directory item and replay it into
  * the subvolume.
@@ -1666,10 +1703,17 @@ out:
        return ret;
 
 insert:
+       if (name_in_log_ref(root->log_root, name, name_len,
+                           key->objectid, log_key.objectid)) {
+               /* The dentry will be added later. */
+               ret = 0;
+               update_size = false;
+               goto out;
+       }
        btrfs_release_path(path);
        ret = insert_one_name(trans, root, path, key->objectid, key->offset,
                              name, name_len, log_type, &log_key);
-       if (ret && ret != -ENOENT)
+       if (ret && ret != -ENOENT && ret != -EEXIST)
                goto out;
        update_size = false;
        ret = 0;
@@ -2164,7 +2208,7 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
                parent = path->nodes[*level];
                root_owner = btrfs_header_owner(parent);
 
-               next = btrfs_find_create_tree_block(root, bytenr, blocksize);
+               next = btrfs_find_create_tree_block(root, bytenr);
                if (!next)
                        return -ENOMEM;
 
@@ -2416,8 +2460,8 @@ static void wait_for_writer(struct btrfs_trans_handle *trans,
                mutex_unlock(&root->log_mutex);
                if (atomic_read(&root->log_writers))
                        schedule();
-               mutex_lock(&root->log_mutex);
                finish_wait(&root->log_writer_wait, &wait);
+               mutex_lock(&root->log_mutex);
        }
 }
 
@@ -3219,7 +3263,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
 static void fill_inode_item(struct btrfs_trans_handle *trans,
                            struct extent_buffer *leaf,
                            struct btrfs_inode_item *item,
-                           struct inode *inode, int log_inode_only)
+                           struct inode *inode, int log_inode_only,
+                           u64 logged_isize)
 {
        struct btrfs_map_token token;
 
@@ -3232,7 +3277,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
                 * to say 'update this inode with these values'
                 */
                btrfs_set_token_inode_generation(leaf, item, 0, &token);
-               btrfs_set_token_inode_size(leaf, item, 0, &token);
+               btrfs_set_token_inode_size(leaf, item, logged_isize, &token);
        } else {
                btrfs_set_token_inode_generation(leaf, item,
                                                 BTRFS_I(inode)->generation,
@@ -3245,19 +3290,19 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
        btrfs_set_token_inode_mode(leaf, item, inode->i_mode, &token);
        btrfs_set_token_inode_nlink(leaf, item, inode->i_nlink, &token);
 
-       btrfs_set_token_timespec_sec(leaf, btrfs_inode_atime(item),
+       btrfs_set_token_timespec_sec(leaf, &item->atime,
                                     inode->i_atime.tv_sec, &token);
-       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_atime(item),
+       btrfs_set_token_timespec_nsec(leaf, &item->atime,
                                      inode->i_atime.tv_nsec, &token);
 
-       btrfs_set_token_timespec_sec(leaf, btrfs_inode_mtime(item),
+       btrfs_set_token_timespec_sec(leaf, &item->mtime,
                                     inode->i_mtime.tv_sec, &token);
-       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_mtime(item),
+       btrfs_set_token_timespec_nsec(leaf, &item->mtime,
                                      inode->i_mtime.tv_nsec, &token);
 
-       btrfs_set_token_timespec_sec(leaf, btrfs_inode_ctime(item),
+       btrfs_set_token_timespec_sec(leaf, &item->ctime,
                                     inode->i_ctime.tv_sec, &token);
-       btrfs_set_token_timespec_nsec(leaf, btrfs_inode_ctime(item),
+       btrfs_set_token_timespec_nsec(leaf, &item->ctime,
                                      inode->i_ctime.tv_nsec, &token);
 
        btrfs_set_token_inode_nbytes(leaf, item, inode_get_bytes(inode),
@@ -3284,7 +3329,7 @@ static int log_inode_item(struct btrfs_trans_handle *trans,
                return ret;
        inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
                                    struct btrfs_inode_item);
-       fill_inode_item(trans, path->nodes[0], inode_item, inode, 0);
+       fill_inode_item(trans, path->nodes[0], inode_item, inode, 0, 0);
        btrfs_release_path(path);
        return 0;
 }
@@ -3293,7 +3338,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
                               struct inode *inode,
                               struct btrfs_path *dst_path,
                               struct btrfs_path *src_path, u64 *last_extent,
-                              int start_slot, int nr, int inode_only)
+                              int start_slot, int nr, int inode_only,
+                              u64 logged_isize)
 {
        unsigned long src_offset;
        unsigned long dst_offset;
@@ -3350,7 +3396,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
                                                    dst_path->slots[0],
                                                    struct btrfs_inode_item);
                        fill_inode_item(trans, dst_path->nodes[0], inode_item,
-                                       inode, inode_only == LOG_INODE_EXISTS);
+                                       inode, inode_only == LOG_INODE_EXISTS,
+                                       logged_isize);
                } else {
                        copy_extent_buffer(dst_path->nodes[0], src, dst_offset,
                                           src_offset, ins_sizes[i]);
@@ -3902,6 +3949,33 @@ process:
        return ret;
 }
 
+static int logged_inode_size(struct btrfs_root *log, struct inode *inode,
+                            struct btrfs_path *path, u64 *size_ret)
+{
+       struct btrfs_key key;
+       int ret;
+
+       key.objectid = btrfs_ino(inode);
+       key.type = BTRFS_INODE_ITEM_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(NULL, log, &key, path, 0, 0);
+       if (ret < 0) {
+               return ret;
+       } else if (ret > 0) {
+               *size_ret = i_size_read(inode);
+       } else {
+               struct btrfs_inode_item *item;
+
+               item = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                                     struct btrfs_inode_item);
+               *size_ret = btrfs_inode_size(path->nodes[0], item);
+       }
+
+       btrfs_release_path(path);
+       return 0;
+}
+
 /* log a single inode in the tree log.
  * At least one parent directory for this inode must exist in the tree
  * or be logged already.
@@ -3939,6 +4013,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        bool fast_search = false;
        u64 ino = btrfs_ino(inode);
        struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
+       u64 logged_isize = 0;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -3966,15 +4041,22 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
                max_key.type = (u8)-1;
        max_key.offset = (u64)-1;
 
-       /* Only run delayed items if we are a dir or a new file */
+       /*
+        * Only run delayed items if we are a dir or a new file.
+        * Otherwise commit the delayed inode only, which is needed in
+        * order for the log replay code to mark inodes for link count
+        * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
+        */
        if (S_ISDIR(inode->i_mode) ||
-           BTRFS_I(inode)->generation > root->fs_info->last_trans_committed) {
+           BTRFS_I(inode)->generation > root->fs_info->last_trans_committed)
                ret = btrfs_commit_inode_delayed_items(trans, inode);
-               if (ret) {
-                       btrfs_free_path(path);
-                       btrfs_free_path(dst_path);
-                       return ret;
-               }
+       else
+               ret = btrfs_commit_inode_delayed_inode(inode);
+
+       if (ret) {
+               btrfs_free_path(path);
+               btrfs_free_path(dst_path);
+               return ret;
        }
 
        mutex_lock(&BTRFS_I(inode)->log_mutex);
@@ -3988,22 +4070,56 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
        if (S_ISDIR(inode->i_mode)) {
                int max_key_type = BTRFS_DIR_LOG_INDEX_KEY;
 
-               if (inode_only == LOG_INODE_EXISTS)
-                       max_key_type = BTRFS_XATTR_ITEM_KEY;
+               if (inode_only == LOG_INODE_EXISTS) {
+                       max_key_type = BTRFS_INODE_EXTREF_KEY;
+                       max_key.type = max_key_type;
+               }
                ret = drop_objectid_items(trans, log, path, ino, max_key_type);
        } else {
-               if (test_and_clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
-                                      &BTRFS_I(inode)->runtime_flags)) {
-                       clear_bit(BTRFS_INODE_COPY_EVERYTHING,
-                                 &BTRFS_I(inode)->runtime_flags);
-                       ret = btrfs_truncate_inode_items(trans, log,
-                                                        inode, 0, 0);
-               } else if (test_and_clear_bit(BTRFS_INODE_COPY_EVERYTHING,
-                                             &BTRFS_I(inode)->runtime_flags) ||
+               if (inode_only == LOG_INODE_EXISTS) {
+                       /*
+                        * Make sure the new inode item we write to the log has
+                        * the same isize as the current one (if it exists).
+                        * This is necessary to prevent data loss after log
+                        * replay, and also to prevent doing a wrong expanding
+                        * truncate - for e.g. create file, write 4K into offset
+                        * 0, fsync, write 4K into offset 4096, add hard link,
+                        * fsync some other file (to sync log), power fail - if
+                        * we use the inode's current i_size, after log replay
+                        * we get a 8Kb file, with the last 4Kb extent as a hole
+                        * (zeroes), as if an expanding truncate happened,
+                        * instead of getting a file of 4Kb only.
+                        */
+                       err = logged_inode_size(log, inode, path,
+                                               &logged_isize);
+                       if (err)
+                               goto out_unlock;
+               }
+               if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                            &BTRFS_I(inode)->runtime_flags)) {
+                       if (inode_only == LOG_INODE_EXISTS) {
+                               max_key.type = BTRFS_INODE_EXTREF_KEY;
+                               ret = drop_objectid_items(trans, log, path, ino,
+                                                         max_key.type);
+                       } else {
+                               clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+                                         &BTRFS_I(inode)->runtime_flags);
+                               clear_bit(BTRFS_INODE_COPY_EVERYTHING,
+                                         &BTRFS_I(inode)->runtime_flags);
+                               ret = btrfs_truncate_inode_items(trans, log,
+                                                                inode, 0, 0);
+                       }
+               } else if (test_bit(BTRFS_INODE_COPY_EVERYTHING,
+                                   &BTRFS_I(inode)->runtime_flags) ||
                           inode_only == LOG_INODE_EXISTS) {
-                       if (inode_only == LOG_INODE_ALL)
+                       if (inode_only == LOG_INODE_ALL) {
+                               clear_bit(BTRFS_INODE_COPY_EVERYTHING,
+                                         &BTRFS_I(inode)->runtime_flags);
                                fast_search = true;
-                       max_key.type = BTRFS_XATTR_ITEM_KEY;
+                               max_key.type = BTRFS_XATTR_ITEM_KEY;
+                       } else {
+                               max_key.type = BTRFS_INODE_EXTREF_KEY;
+                       }
                        ret = drop_objectid_items(trans, log, path, ino,
                                                  max_key.type);
                } else {
@@ -4047,7 +4163,8 @@ again:
                }
 
                ret = copy_items(trans, inode, dst_path, path, &last_extent,
-                                ins_start_slot, ins_nr, inode_only);
+                                ins_start_slot, ins_nr, inode_only,
+                                logged_isize);
                if (ret < 0) {
                        err = ret;
                        goto out_unlock;
@@ -4071,7 +4188,7 @@ next_slot:
                if (ins_nr) {
                        ret = copy_items(trans, inode, dst_path, path,
                                         &last_extent, ins_start_slot,
-                                        ins_nr, inode_only);
+                                        ins_nr, inode_only, logged_isize);
                        if (ret < 0) {
                                err = ret;
                                goto out_unlock;
@@ -4092,7 +4209,8 @@ next_slot:
        }
        if (ins_nr) {
                ret = copy_items(trans, inode, dst_path, path, &last_extent,
-                                ins_start_slot, ins_nr, inode_only);
+                                ins_start_slot, ins_nr, inode_only,
+                                logged_isize);
                if (ret < 0) {
                        err = ret;
                        goto out_unlock;
@@ -4273,6 +4391,9 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
        struct dentry *old_parent = NULL;
        int ret = 0;
        u64 last_committed = root->fs_info->last_trans_committed;
+       const struct dentry * const first_parent = parent;
+       const bool did_unlink = (BTRFS_I(inode)->last_unlink_trans >
+                                last_committed);
 
        sb = inode->i_sb;
 
@@ -4328,7 +4449,6 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
                goto end_trans;
        }
 
-       inode_only = LOG_INODE_EXISTS;
        while (1) {
                if (!parent || !parent->d_inode || sb != parent->d_inode->i_sb)
                        break;
@@ -4337,8 +4457,22 @@ static int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
                if (root != BTRFS_I(inode)->root)
                        break;
 
+               /*
+                * On unlink we must make sure our immediate parent directory
+                * inode is fully logged. This is to prevent leaving dangling
+                * directory index entries and a wrong directory inode's i_size.
+                * Not doing so can result in a directory being impossible to
+                * delete after log replay (rmdir will always fail with error
+                * -ENOTEMPTY).
+                */
+               if (did_unlink && parent == first_parent)
+                       inode_only = LOG_INODE_ALL;
+               else
+                       inode_only = LOG_INODE_EXISTS;
+
                if (BTRFS_I(inode)->generation >
-                   root->fs_info->last_trans_committed) {
+                   root->fs_info->last_trans_committed ||
+                   inode_only == LOG_INODE_ALL) {
                        ret = btrfs_log_inode(trans, root, inode, inode_only,
                                              0, LLONG_MAX, ctx);
                        if (ret)
index 50c5a8762aedfc7bf5be640b96b3eb4622b58f88..cd4d1315aaa92d43476a9e86c521e94a04519d0e 100644 (file)
@@ -1310,6 +1310,8 @@ again:
        if (ret) {
                btrfs_error(root->fs_info, ret,
                            "Failed to remove dev extent item");
+       } else {
+               trans->transaction->have_free_bgs = 1;
        }
 out:
        btrfs_free_path(path);
@@ -4196,7 +4198,7 @@ static u32 find_raid56_stripe_len(u32 data_devices, u32 dev_stripe_target)
 
 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
 {
-       if (!(type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)))
+       if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
                return;
 
        btrfs_set_fs_incompat(info, RAID56);
@@ -4803,10 +4805,8 @@ unsigned long btrfs_full_stripe_len(struct btrfs_root *root,
 
        BUG_ON(em->start > logical || em->start + em->len < logical);
        map = (struct map_lookup *)em->bdev;
-       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                        BTRFS_BLOCK_GROUP_RAID6)) {
+       if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
                len = map->stripe_len * nr_data_stripes(map);
-       }
        free_extent_map(em);
        return len;
 }
@@ -4826,8 +4826,7 @@ int btrfs_is_parity_mirror(struct btrfs_mapping_tree *map_tree,
 
        BUG_ON(em->start > logical || em->start + em->len < logical);
        map = (struct map_lookup *)em->bdev;
-       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                        BTRFS_BLOCK_GROUP_RAID6))
+       if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
                ret = 1;
        free_extent_map(em);
        return ret;
@@ -4876,32 +4875,24 @@ static inline int parity_smaller(u64 a, u64 b)
 }
 
 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
-static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
+static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
 {
        struct btrfs_bio_stripe s;
-       int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
        int i;
        u64 l;
        int again = 1;
-       int m;
 
        while (again) {
                again = 0;
-               for (i = 0; i < real_stripes - 1; i++) {
-                       if (parity_smaller(raid_map[i], raid_map[i+1])) {
+               for (i = 0; i < num_stripes - 1; i++) {
+                       if (parity_smaller(bbio->raid_map[i],
+                                          bbio->raid_map[i+1])) {
                                s = bbio->stripes[i];
-                               l = raid_map[i];
+                               l = bbio->raid_map[i];
                                bbio->stripes[i] = bbio->stripes[i+1];
-                               raid_map[i] = raid_map[i+1];
+                               bbio->raid_map[i] = bbio->raid_map[i+1];
                                bbio->stripes[i+1] = s;
-                               raid_map[i+1] = l;
-
-                               if (bbio->tgtdev_map) {
-                                       m = bbio->tgtdev_map[i];
-                                       bbio->tgtdev_map[i] =
-                                                       bbio->tgtdev_map[i + 1];
-                                       bbio->tgtdev_map[i + 1] = m;
-                               }
+                               bbio->raid_map[i+1] = l;
 
                                again = 1;
                        }
@@ -4909,10 +4900,41 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
        }
 }
 
+static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
+{
+       struct btrfs_bio *bbio = kzalloc(
+               sizeof(struct btrfs_bio) +
+               sizeof(struct btrfs_bio_stripe) * (total_stripes) +
+               sizeof(int) * (real_stripes) +
+               sizeof(u64) * (real_stripes),
+               GFP_NOFS);
+       if (!bbio)
+               return NULL;
+
+       atomic_set(&bbio->error, 0);
+       atomic_set(&bbio->refs, 1);
+
+       return bbio;
+}
+
+void btrfs_get_bbio(struct btrfs_bio *bbio)
+{
+       WARN_ON(!atomic_read(&bbio->refs));
+       atomic_inc(&bbio->refs);
+}
+
+void btrfs_put_bbio(struct btrfs_bio *bbio)
+{
+       if (!bbio)
+               return;
+       if (atomic_dec_and_test(&bbio->refs))
+               kfree(bbio);
+}
+
 static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                             u64 logical, u64 *length,
                             struct btrfs_bio **bbio_ret,
-                            int mirror_num, u64 **raid_map_ret)
+                            int mirror_num, int need_raid_map)
 {
        struct extent_map *em;
        struct map_lookup *map;
@@ -4925,7 +4947,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        u64 stripe_nr_orig;
        u64 stripe_nr_end;
        u64 stripe_len;
-       u64 *raid_map = NULL;
        int stripe_index;
        int i;
        int ret = 0;
@@ -4976,7 +4997,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        stripe_offset = offset - stripe_offset;
 
        /* if we're here for raid56, we need to know the stripe aligned start */
-       if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
+       if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                unsigned long full_stripe_len = stripe_len * nr_data_stripes(map);
                raid56_full_stripe_start = offset;
 
@@ -4989,8 +5010,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
 
        if (rw & REQ_DISCARD) {
                /* we don't discard raid56 yet */
-               if (map->type &
-                   (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6)) {
+               if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                        ret = -EOPNOTSUPP;
                        goto out;
                }
@@ -5000,7 +5020,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                /* For writes to RAID[56], allow a full stripeset across all disks.
                   For other RAID types and for RAID[56] reads, just allow a single
                   stripe (on a single disk). */
-               if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6) &&
+               if ((map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
                    (rw & REQ_WRITE)) {
                        max_len = stripe_len * nr_data_stripes(map) -
                                (offset - raid56_full_stripe_start);
@@ -5047,7 +5067,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                u64 physical_of_found = 0;
 
                ret = __btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS,
-                            logical, &tmp_length, &tmp_bbio, 0, NULL);
+                            logical, &tmp_length, &tmp_bbio, 0, 0);
                if (ret) {
                        WARN_ON(tmp_bbio != NULL);
                        goto out;
@@ -5061,7 +5081,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                         * is not left of the left cursor
                         */
                        ret = -EIO;
-                       kfree(tmp_bbio);
+                       btrfs_put_bbio(tmp_bbio);
                        goto out;
                }
 
@@ -5096,11 +5116,11 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                } else {
                        WARN_ON(1);
                        ret = -EIO;
-                       kfree(tmp_bbio);
+                       btrfs_put_bbio(tmp_bbio);
                        goto out;
                }
 
-               kfree(tmp_bbio);
+               btrfs_put_bbio(tmp_bbio);
        } else if (mirror_num > map->num_stripes) {
                mirror_num = 0;
        }
@@ -5166,15 +5186,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                        mirror_num = stripe_index - old_stripe_index + 1;
                }
 
-       } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                               BTRFS_BLOCK_GROUP_RAID6)) {
-               u64 tmp;
-
-               if (raid_map_ret &&
+       } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
+               if (need_raid_map &&
                    ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
                     mirror_num > 1)) {
-                       int i, rot;
-
                        /* push stripe_nr back to the start of the full stripe */
                        stripe_nr = raid56_full_stripe_start;
                        do_div(stripe_nr, stripe_len * nr_data_stripes(map));
@@ -5183,32 +5198,12 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                        num_stripes = map->num_stripes;
                        max_errors = nr_parity_stripes(map);
 
-                       raid_map = kmalloc_array(num_stripes, sizeof(u64),
-                                          GFP_NOFS);
-                       if (!raid_map) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-
-                       /* Work out the disk rotation on this stripe-set */
-                       tmp = stripe_nr;
-                       rot = do_div(tmp, num_stripes);
-
-                       /* Fill in the logical address of each stripe */
-                       tmp = stripe_nr * nr_data_stripes(map);
-                       for (i = 0; i < nr_data_stripes(map); i++)
-                               raid_map[(i+rot) % num_stripes] =
-                                       em->start + (tmp + i) * map->stripe_len;
-
-                       raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
-                       if (map->type & BTRFS_BLOCK_GROUP_RAID6)
-                               raid_map[(i+rot+1) % num_stripes] =
-                                       RAID6_Q_STRIPE;
-
                        *length = map->stripe_len;
                        stripe_index = 0;
                        stripe_offset = 0;
                } else {
+                       u64 tmp;
+
                        /*
                         * Mirror #0 or #1 means the original data block.
                         * Mirror #2 is RAID5 parity block.
@@ -5246,17 +5241,42 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                tgtdev_indexes = num_stripes;
        }
 
-       bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes),
-                      GFP_NOFS);
+       bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
        if (!bbio) {
-               kfree(raid_map);
                ret = -ENOMEM;
                goto out;
        }
-       atomic_set(&bbio->error, 0);
        if (dev_replace_is_ongoing)
                bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
 
+       /* build raid_map */
+       if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK &&
+           need_raid_map && ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
+           mirror_num > 1)) {
+               u64 tmp;
+               int i, rot;
+
+               bbio->raid_map = (u64 *)((void *)bbio->stripes +
+                                sizeof(struct btrfs_bio_stripe) *
+                                num_alloc_stripes +
+                                sizeof(int) * tgtdev_indexes);
+
+               /* Work out the disk rotation on this stripe-set */
+               tmp = stripe_nr;
+               rot = do_div(tmp, num_stripes);
+
+               /* Fill in the logical address of each stripe */
+               tmp = stripe_nr * nr_data_stripes(map);
+               for (i = 0; i < nr_data_stripes(map); i++)
+                       bbio->raid_map[(i+rot) % num_stripes] =
+                               em->start + (tmp + i) * map->stripe_len;
+
+               bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
+               if (map->type & BTRFS_BLOCK_GROUP_RAID6)
+                       bbio->raid_map[(i+rot+1) % num_stripes] =
+                               RAID6_Q_STRIPE;
+       }
+
        if (rw & REQ_DISCARD) {
                int factor = 0;
                int sub_stripes = 0;
@@ -5340,6 +5360,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
                max_errors = btrfs_chunk_max_errors(map);
 
+       if (bbio->raid_map)
+               sort_parity_stripes(bbio, num_stripes);
+
        tgtdev_indexes = 0;
        if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
            dev_replace->tgtdev != NULL) {
@@ -5427,6 +5450,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
        }
 
        *bbio_ret = bbio;
+       bbio->map_type = map->type;
        bbio->num_stripes = num_stripes;
        bbio->max_errors = max_errors;
        bbio->mirror_num = mirror_num;
@@ -5443,10 +5467,6 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
                bbio->mirror_num = map->num_stripes + 1;
        }
-       if (raid_map) {
-               sort_parity_stripes(bbio, raid_map);
-               *raid_map_ret = raid_map;
-       }
 out:
        if (dev_replace_is_ongoing)
                btrfs_dev_replace_unlock(dev_replace);
@@ -5459,17 +5479,17 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                      struct btrfs_bio **bbio_ret, int mirror_num)
 {
        return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
-                                mirror_num, NULL);
+                                mirror_num, 0);
 }
 
 /* For Scrub/replace */
 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
                     u64 logical, u64 *length,
                     struct btrfs_bio **bbio_ret, int mirror_num,
-                    u64 **raid_map_ret)
+                    int need_raid_map)
 {
        return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
-                                mirror_num, raid_map_ret);
+                                mirror_num, need_raid_map);
 }
 
 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
@@ -5511,8 +5531,7 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
                do_div(length, map->num_stripes / map->sub_stripes);
        else if (map->type & BTRFS_BLOCK_GROUP_RAID0)
                do_div(length, map->num_stripes);
-       else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
-                             BTRFS_BLOCK_GROUP_RAID6)) {
+       else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
                do_div(length, nr_data_stripes(map));
                rmap_len = map->stripe_len * nr_data_stripes(map);
        }
@@ -5565,7 +5584,7 @@ static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int e
                bio_endio_nodec(bio, err);
        else
                bio_endio(bio, err);
-       kfree(bbio);
+       btrfs_put_bbio(bbio);
 }
 
 static void btrfs_end_bio(struct bio *bio, int err)
@@ -5808,7 +5827,6 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
        u64 logical = (u64)bio->bi_iter.bi_sector << 9;
        u64 length = 0;
        u64 map_length;
-       u64 *raid_map = NULL;
        int ret;
        int dev_nr = 0;
        int total_devs = 1;
@@ -5819,7 +5837,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
 
        btrfs_bio_counter_inc_blocked(root->fs_info);
        ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
-                             mirror_num, &raid_map);
+                             mirror_num, 1);
        if (ret) {
                btrfs_bio_counter_dec(root->fs_info);
                return ret;
@@ -5832,15 +5850,13 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
        bbio->fs_info = root->fs_info;
        atomic_set(&bbio->stripes_pending, bbio->num_stripes);
 
-       if (raid_map) {
+       if (bbio->raid_map) {
                /* In this case, map_length has been set to the length of
                   a single stripe; not the whole write */
                if (rw & WRITE) {
-                       ret = raid56_parity_write(root, bio, bbio,
-                                                 raid_map, map_length);
+                       ret = raid56_parity_write(root, bio, bbio, map_length);
                } else {
-                       ret = raid56_parity_recover(root, bio, bbio,
-                                                   raid_map, map_length,
+                       ret = raid56_parity_recover(root, bio, bbio, map_length,
                                                    mirror_num, 1);
                }
 
@@ -6238,17 +6254,22 @@ int btrfs_read_sys_array(struct btrfs_root *root)
        struct extent_buffer *sb;
        struct btrfs_disk_key *disk_key;
        struct btrfs_chunk *chunk;
-       u8 *ptr;
-       unsigned long sb_ptr;
+       u8 *array_ptr;
+       unsigned long sb_array_offset;
        int ret = 0;
        u32 num_stripes;
        u32 array_size;
        u32 len = 0;
-       u32 cur;
+       u32 cur_offset;
        struct btrfs_key key;
 
-       sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET,
-                                         BTRFS_SUPER_INFO_SIZE);
+       ASSERT(BTRFS_SUPER_INFO_SIZE <= root->nodesize);
+       /*
+        * This will create extent buffer of nodesize, superblock size is
+        * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
+        * overallocate but we can keep it as-is, only the first page is used.
+        */
+       sb = btrfs_find_create_tree_block(root, BTRFS_SUPER_INFO_OFFSET);
        if (!sb)
                return -ENOMEM;
        btrfs_set_buffer_uptodate(sb);
@@ -6271,35 +6292,56 @@ int btrfs_read_sys_array(struct btrfs_root *root)
        write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
        array_size = btrfs_super_sys_array_size(super_copy);
 
-       ptr = super_copy->sys_chunk_array;
-       sb_ptr = offsetof(struct btrfs_super_block, sys_chunk_array);
-       cur = 0;
+       array_ptr = super_copy->sys_chunk_array;
+       sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
+       cur_offset = 0;
+
+       while (cur_offset < array_size) {
+               disk_key = (struct btrfs_disk_key *)array_ptr;
+               len = sizeof(*disk_key);
+               if (cur_offset + len > array_size)
+                       goto out_short_read;
 
-       while (cur < array_size) {
-               disk_key = (struct btrfs_disk_key *)ptr;
                btrfs_disk_key_to_cpu(&key, disk_key);
 
-               len = sizeof(*disk_key); ptr += len;
-               sb_ptr += len;
-               cur += len;
+               array_ptr += len;
+               sb_array_offset += len;
+               cur_offset += len;
 
                if (key.type == BTRFS_CHUNK_ITEM_KEY) {
-                       chunk = (struct btrfs_chunk *)sb_ptr;
+                       chunk = (struct btrfs_chunk *)sb_array_offset;
+                       /*
+                        * At least one btrfs_chunk with one stripe must be
+                        * present, exact stripe count check comes afterwards
+                        */
+                       len = btrfs_chunk_item_size(1);
+                       if (cur_offset + len > array_size)
+                               goto out_short_read;
+
+                       num_stripes = btrfs_chunk_num_stripes(sb, chunk);
+                       len = btrfs_chunk_item_size(num_stripes);
+                       if (cur_offset + len > array_size)
+                               goto out_short_read;
+
                        ret = read_one_chunk(root, &key, sb, chunk);
                        if (ret)
                                break;
-                       num_stripes = btrfs_chunk_num_stripes(sb, chunk);
-                       len = btrfs_chunk_item_size(num_stripes);
                } else {
                        ret = -EIO;
                        break;
                }
-               ptr += len;
-               sb_ptr += len;
-               cur += len;
+               array_ptr += len;
+               sb_array_offset += len;
+               cur_offset += len;
        }
        free_extent_buffer(sb);
        return ret;
+
+out_short_read:
+       printk(KERN_ERR "BTRFS: sys_array too short to read %u bytes at offset %u\n",
+                       len, cur_offset);
+       free_extent_buffer(sb);
+       return -EIO;
 }
 
 int btrfs_read_chunk_tree(struct btrfs_root *root)
index d6fe73c0f4a2008604e7de74419af9801b80d1e4..83069dec6898a1a024baa28b98dca3174a3959c0 100644 (file)
@@ -295,8 +295,10 @@ typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
 #define BTRFS_BIO_ORIG_BIO_SUBMITTED   (1 << 0)
 
 struct btrfs_bio {
+       atomic_t refs;
        atomic_t stripes_pending;
        struct btrfs_fs_info *fs_info;
+       u64 map_type; /* get from map_lookup->type */
        bio_end_io_t *end_io;
        struct bio *orig_bio;
        unsigned long flags;
@@ -307,6 +309,12 @@ struct btrfs_bio {
        int mirror_num;
        int num_tgtdevs;
        int *tgtdev_map;
+       /*
+        * logical block numbers for the start of each stripe
+        * The last one or two are p/q.  These are sorted,
+        * so raid_map[0] is the start of our full stripe
+        */
+       u64 *raid_map;
        struct btrfs_bio_stripe stripes[];
 };
 
@@ -388,19 +396,15 @@ struct btrfs_balance_control {
 
 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
                                   u64 end, u64 *length);
-
-#define btrfs_bio_size(total_stripes, real_stripes)            \
-       (sizeof(struct btrfs_bio) +                             \
-        (sizeof(struct btrfs_bio_stripe) * (total_stripes)) +  \
-        (sizeof(int) * (real_stripes)))
-
+void btrfs_get_bbio(struct btrfs_bio *bbio);
+void btrfs_put_bbio(struct btrfs_bio *bbio);
 int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
                    u64 logical, u64 *length,
                    struct btrfs_bio **bbio_ret, int mirror_num);
 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
                     u64 logical, u64 *length,
                     struct btrfs_bio **bbio_ret, int mirror_num,
-                    u64 **raid_map_ret);
+                    int need_raid_map);
 int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
                     u64 chunk_start, u64 physical, u64 devid,
                     u64 **logical, int *naddrs, int *stripe_len);
index ce1b115dcc28bc2968009e5e8182004da99cf29e..f601def05bdf00663f5b1666514e1810068e1b19 100644 (file)
@@ -574,7 +574,7 @@ static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
        /* extract the directory dentry from the cwd */
        get_fs_pwd(current->fs, &path);
 
-       if (!S_ISDIR(path.dentry->d_inode->i_mode))
+       if (!d_can_lookup(path.dentry))
                goto notdir;
 
        cachefiles_begin_secure(cache, &saved_cred);
@@ -646,7 +646,7 @@ static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
        /* extract the directory dentry from the cwd */
        get_fs_pwd(current->fs, &path);
 
-       if (!S_ISDIR(path.dentry->d_inode->i_mode))
+       if (!d_can_lookup(path.dentry))
                goto notdir;
 
        cachefiles_begin_secure(cache, &saved_cred);
index 1c7293c3a93ae935c5be8cdfc149fcd3f84e0dad..232426214fdd1849b21a4e3c080563e2132e8bd2 100644 (file)
@@ -437,7 +437,7 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
        if (!object->backer)
                return -ENOBUFS;
 
-       ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+       ASSERT(d_is_reg(object->backer));
 
        fscache_set_store_limit(&object->fscache, ni_size);
 
@@ -501,7 +501,7 @@ static void cachefiles_invalidate_object(struct fscache_operation *op)
               op->object->debug_id, (unsigned long long)ni_size);
 
        if (object->backer) {
-               ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+               ASSERT(d_is_reg(object->backer));
 
                fscache_set_store_limit(&object->fscache, ni_size);
 
index 7f8e83f9d74eb87712db9e8fdc7da95fcccaefdb..1e51714eb33e15f5e6624699021d26d95e1c16aa 100644 (file)
@@ -277,7 +277,7 @@ static int cachefiles_bury_object(struct cachefiles_cache *cache,
        _debug("remove %p from %p", rep, dir);
 
        /* non-directories can just be unlinked */
-       if (!S_ISDIR(rep->d_inode->i_mode)) {
+       if (!d_is_dir(rep)) {
                _debug("unlink stale object");
 
                path.mnt = cache->mnt;
@@ -323,7 +323,7 @@ try_again:
                return 0;
        }
 
-       if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) {
+       if (!d_can_lookup(cache->graveyard)) {
                unlock_rename(cache->graveyard, dir);
                cachefiles_io_error(cache, "Graveyard no longer a directory");
                return -EIO;
@@ -475,7 +475,7 @@ int cachefiles_walk_to_object(struct cachefiles_object *parent,
        ASSERT(parent->dentry);
        ASSERT(parent->dentry->d_inode);
 
-       if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) {
+       if (!(d_is_dir(parent->dentry))) {
                // TODO: convert file to dir
                _leave("looking up in none directory");
                return -ENOBUFS;
@@ -539,7 +539,7 @@ lookup_again:
                        _debug("mkdir -> %p{%p{ino=%lu}}",
                               next, next->d_inode, next->d_inode->i_ino);
 
-               } else if (!S_ISDIR(next->d_inode->i_mode)) {
+               } else if (!d_can_lookup(next)) {
                        pr_err("inode %lu is not a directory\n",
                               next->d_inode->i_ino);
                        ret = -ENOBUFS;
@@ -568,8 +568,8 @@ lookup_again:
                        _debug("create -> %p{%p{ino=%lu}}",
                               next, next->d_inode, next->d_inode->i_ino);
 
-               } else if (!S_ISDIR(next->d_inode->i_mode) &&
-                          !S_ISREG(next->d_inode->i_mode)
+               } else if (!d_can_lookup(next) &&
+                          !d_is_reg(next)
                           ) {
                        pr_err("inode %lu is not a file or directory\n",
                               next->d_inode->i_ino);
@@ -642,7 +642,7 @@ lookup_again:
 
        /* open a file interface onto a data file */
        if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
-               if (S_ISREG(object->dentry->d_inode->i_mode)) {
+               if (d_is_reg(object->dentry)) {
                        const struct address_space_operations *aops;
 
                        ret = -EPERM;
@@ -763,7 +763,7 @@ struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
        /* we need to make sure the subdir is a directory */
        ASSERT(subdir->d_inode);
 
-       if (!S_ISDIR(subdir->d_inode->i_mode)) {
+       if (!d_can_lookup(subdir)) {
                pr_err("%s is not a directory\n", dirname);
                ret = -EIO;
                goto check_error;
index 616db0e77b44bd8481782047829dfd06605700ae..c6cd8d7a4eef91b379efc75888a94eba6345ac33 100644 (file)
@@ -900,7 +900,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
                return -ENOBUFS;
        }
 
-       ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+       ASSERT(d_is_reg(object->backer));
 
        cache = container_of(object->fscache.cache,
                             struct cachefiles_cache, cache);
index 5bd853ba44ffccfc39b45b511e6f3b31bd3c1e37..64fa248343f65461db232ee4ae0939beff0fc05c 100644 (file)
@@ -40,20 +40,6 @@ static inline void ceph_set_cached_acl(struct inode *inode,
        spin_unlock(&ci->i_ceph_lock);
 }
 
-static inline struct posix_acl *ceph_get_cached_acl(struct inode *inode,
-                                                       int type)
-{
-       struct ceph_inode_info *ci = ceph_inode(inode);
-       struct posix_acl *acl = ACL_NOT_CACHED;
-
-       spin_lock(&ci->i_ceph_lock);
-       if (__ceph_caps_issued_mask(ci, CEPH_CAP_XATTR_SHARED, 0))
-               acl = get_cached_acl(inode, type);
-       spin_unlock(&ci->i_ceph_lock);
-
-       return acl;
-}
-
 struct posix_acl *ceph_get_acl(struct inode *inode, int type)
 {
        int size;
index 24be059fd1f8e073b7ee1d250b20f0bcdab16da0..fd5599d323620a2c5617ea5355e2e1320d6a0954 100644 (file)
@@ -196,17 +196,22 @@ static int readpage_nounlock(struct file *filp, struct page *page)
        u64 len = PAGE_CACHE_SIZE;
 
        if (off >= i_size_read(inode)) {
-               zero_user_segment(page, err, PAGE_CACHE_SIZE);
+               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
                SetPageUptodate(page);
                return 0;
        }
 
-       /*
-        * Uptodate inline data should have been added into page cache
-        * while getting Fcr caps.
-        */
-       if (ci->i_inline_version != CEPH_INLINE_NONE)
-               return -EINVAL;
+       if (ci->i_inline_version != CEPH_INLINE_NONE) {
+               /*
+                * Uptodate inline data should have been added
+                * into page cache while getting Fcr caps.
+                */
+               if (off == 0)
+                       return -EINVAL;
+               zero_user_segment(page, 0, PAGE_CACHE_SIZE);
+               SetPageUptodate(page);
+               return 0;
+       }
 
        err = ceph_readpage_from_fscache(inode, page);
        if (err == 0)
index b93c631c6c87d550e1f9674aaec5d41906d12143..8172775428a0b9165e68d293d23812a3f2be1593 100644 (file)
@@ -577,7 +577,6 @@ void ceph_add_cap(struct inode *inode,
                struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
                                                               realmino);
                if (realm) {
-                       ceph_get_snap_realm(mdsc, realm);
                        spin_lock(&realm->inodes_with_caps_lock);
                        ci->i_snap_realm = realm;
                        list_add(&ci->i_snap_realm_item,
@@ -1451,8 +1450,8 @@ static int __mark_caps_flushing(struct inode *inode,
        spin_lock(&mdsc->cap_dirty_lock);
        list_del_init(&ci->i_dirty_item);
 
-       ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
        if (list_empty(&ci->i_flushing_item)) {
+               ci->i_cap_flush_seq = ++mdsc->cap_flush_seq;
                list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
                mdsc->num_cap_flushing++;
                dout(" inode %p now flushing seq %lld\n", inode,
@@ -2073,17 +2072,16 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got)
  * requested from the MDS.
  */
 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
-                           loff_t endoff, int *got, struct page **pinned_page,
-                           int *check_max, int *err)
+                           loff_t endoff, int *got, int *check_max, int *err)
 {
        struct inode *inode = &ci->vfs_inode;
        int ret = 0;
-       int have, implemented, _got = 0;
+       int have, implemented;
        int file_wanted;
 
        dout("get_cap_refs %p need %s want %s\n", inode,
             ceph_cap_string(need), ceph_cap_string(want));
-again:
+
        spin_lock(&ci->i_ceph_lock);
 
        /* make sure file is actually open */
@@ -2138,50 +2136,34 @@ again:
                     inode, ceph_cap_string(have), ceph_cap_string(not),
                     ceph_cap_string(revoking));
                if ((revoking & not) == 0) {
-                       _got = need | (have & want);
-                       __take_cap_refs(ci, _got);
+                       *got = need | (have & want);
+                       __take_cap_refs(ci, *got);
                        ret = 1;
                }
        } else {
+               int session_readonly = false;
+               if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) {
+                       struct ceph_mds_session *s = ci->i_auth_cap->session;
+                       spin_lock(&s->s_cap_lock);
+                       session_readonly = s->s_readonly;
+                       spin_unlock(&s->s_cap_lock);
+               }
+               if (session_readonly) {
+                       dout("get_cap_refs %p needed %s but mds%d readonly\n",
+                            inode, ceph_cap_string(need), ci->i_auth_cap->mds);
+                       *err = -EROFS;
+                       ret = 1;
+                       goto out_unlock;
+               }
+
                dout("get_cap_refs %p have %s needed %s\n", inode,
                     ceph_cap_string(have), ceph_cap_string(need));
        }
 out_unlock:
        spin_unlock(&ci->i_ceph_lock);
 
-       if (ci->i_inline_version != CEPH_INLINE_NONE &&
-           (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
-           i_size_read(inode) > 0) {
-               int ret1;
-               struct page *page = find_get_page(inode->i_mapping, 0);
-               if (page) {
-                       if (PageUptodate(page)) {
-                               *pinned_page = page;
-                               goto out;
-                       }
-                       page_cache_release(page);
-               }
-               /*
-                * drop cap refs first because getattr while holding
-                * caps refs can cause deadlock.
-                */
-               ceph_put_cap_refs(ci, _got);
-               _got = 0;
-
-               /* getattr request will bring inline data into page cache */
-               ret1 = __ceph_do_getattr(inode, NULL,
-                                        CEPH_STAT_CAP_INLINE_DATA, true);
-               if (ret1 >= 0) {
-                       ret = 0;
-                       goto again;
-               }
-               *err = ret1;
-               ret = 1;
-       }
-out:
        dout("get_cap_refs %p ret %d got %s\n", inode,
-            ret, ceph_cap_string(_got));
-       *got = _got;
+            ret, ceph_cap_string(*got));
        return ret;
 }
 
@@ -2221,22 +2203,52 @@ static void check_max_size(struct inode *inode, loff_t endoff)
 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
                  loff_t endoff, int *got, struct page **pinned_page)
 {
-       int check_max, ret, err;
+       int _got, check_max, ret, err = 0;
 
 retry:
        if (endoff > 0)
                check_max_size(&ci->vfs_inode, endoff);
+       _got = 0;
        check_max = 0;
-       err = 0;
        ret = wait_event_interruptible(ci->i_cap_wq,
-                                      try_get_cap_refs(ci, need, want, endoff,
-                                                       got, pinned_page,
-                                                       &check_max, &err));
+                               try_get_cap_refs(ci, need, want, endoff,
+                                                &_got, &check_max, &err));
        if (err)
                ret = err;
+       if (ret < 0)
+               return ret;
+
        if (check_max)
                goto retry;
-       return ret;
+
+       if (ci->i_inline_version != CEPH_INLINE_NONE &&
+           (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
+           i_size_read(&ci->vfs_inode) > 0) {
+               struct page *page = find_get_page(ci->vfs_inode.i_mapping, 0);
+               if (page) {
+                       if (PageUptodate(page)) {
+                               *pinned_page = page;
+                               goto out;
+                       }
+                       page_cache_release(page);
+               }
+               /*
+                * drop cap refs first because getattr while holding
+                * caps refs can cause deadlock.
+                */
+               ceph_put_cap_refs(ci, _got);
+               _got = 0;
+
+               /* getattr request will bring inline data into page cache */
+               ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
+                                       CEPH_STAT_CAP_INLINE_DATA, true);
+               if (ret < 0)
+                       return ret;
+               goto retry;
+       }
+out:
+       *got = _got;
+       return 0;
 }
 
 /*
@@ -2432,13 +2444,13 @@ static void invalidate_aliases(struct inode *inode)
  */
 static void handle_cap_grant(struct ceph_mds_client *mdsc,
                             struct inode *inode, struct ceph_mds_caps *grant,
-                            void *snaptrace, int snaptrace_len,
                             u64 inline_version,
                             void *inline_data, int inline_len,
                             struct ceph_buffer *xattr_buf,
                             struct ceph_mds_session *session,
                             struct ceph_cap *cap, int issued)
        __releases(ci->i_ceph_lock)
+       __releases(mdsc->snap_rwsem)
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
        int mds = session->s_mds;
@@ -2639,10 +2651,6 @@ static void handle_cap_grant(struct ceph_mds_client *mdsc,
        spin_unlock(&ci->i_ceph_lock);
 
        if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
-               down_write(&mdsc->snap_rwsem);
-               ceph_update_snap_trace(mdsc, snaptrace,
-                                      snaptrace + snaptrace_len, false);
-               downgrade_write(&mdsc->snap_rwsem);
                kick_flushing_inode_caps(mdsc, session, inode);
                up_read(&mdsc->snap_rwsem);
                if (newcaps & ~issued)
@@ -3052,6 +3060,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        struct ceph_cap *cap;
        struct ceph_mds_caps *h;
        struct ceph_mds_cap_peer *peer = NULL;
+       struct ceph_snap_realm *realm;
        int mds = session->s_mds;
        int op, issued;
        u32 seq, mseq;
@@ -3153,11 +3162,23 @@ void ceph_handle_caps(struct ceph_mds_session *session,
                goto done_unlocked;
 
        case CEPH_CAP_OP_IMPORT:
+               realm = NULL;
+               if (snaptrace_len) {
+                       down_write(&mdsc->snap_rwsem);
+                       ceph_update_snap_trace(mdsc, snaptrace,
+                                              snaptrace + snaptrace_len,
+                                              false, &realm);
+                       downgrade_write(&mdsc->snap_rwsem);
+               } else {
+                       down_read(&mdsc->snap_rwsem);
+               }
                handle_cap_import(mdsc, inode, h, peer, session,
                                  &cap, &issued);
-               handle_cap_grant(mdsc, inode, h,  snaptrace, snaptrace_len,
+               handle_cap_grant(mdsc, inode, h,
                                 inline_version, inline_data, inline_len,
                                 msg->middle, session, cap, issued);
+               if (realm)
+                       ceph_put_snap_realm(mdsc, realm);
                goto done_unlocked;
        }
 
@@ -3177,7 +3198,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
        case CEPH_CAP_OP_GRANT:
                __ceph_caps_issued(ci, &issued);
                issued |= __ceph_caps_dirty(ci);
-               handle_cap_grant(mdsc, inode, h, NULL, 0,
+               handle_cap_grant(mdsc, inode, h,
                                 inline_version, inline_data, inline_len,
                                 msg->middle, session, cap, issued);
                goto done_unlocked;
index c241603764fdc560ae72cea19410b49c15e860d2..83e9976f718983ccfb5d95c56893e81c7d01caa9 100644 (file)
@@ -26,8 +26,6 @@
  * point by name.
  */
 
-const struct inode_operations ceph_dir_iops;
-const struct file_operations ceph_dir_fops;
 const struct dentry_operations ceph_dentry_ops;
 
 /*
@@ -672,13 +670,17 @@ int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
                /*
                 * We created the item, then did a lookup, and found
                 * it was already linked to another inode we already
-                * had in our cache (and thus got spliced).  Link our
-                * dentry to that inode, but don't hash it, just in
-                * case the VFS wants to dereference it.
+                * had in our cache (and thus got spliced). To not
+                * confuse VFS (especially when inode is a directory),
+                * we don't link our dentry to that inode, return an
+                * error instead.
+                *
+                * This event should be rare and it happens only when
+                * we talk to old MDS. Recent MDS does not send traceless
+                * reply for request that creates new inode.
                 */
-               BUG_ON(!result->d_inode);
-               d_instantiate(dentry, result->d_inode);
-               return 0;
+               d_drop(result);
+               return -ESTALE;
        }
        return PTR_ERR(result);
 }
@@ -902,7 +904,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
        } else if (ceph_snap(dir) == CEPH_NOSNAP) {
                dout("unlink/rmdir dir %p dn %p inode %p\n",
                     dir, dentry, inode);
-               op = S_ISDIR(dentry->d_inode->i_mode) ?
+               op = d_is_dir(dentry) ?
                        CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
        } else
                goto out;
@@ -1335,6 +1337,13 @@ const struct file_operations ceph_dir_fops = {
        .fsync = ceph_dir_fsync,
 };
 
+const struct file_operations ceph_snapdir_fops = {
+       .iterate = ceph_readdir,
+       .llseek = ceph_dir_llseek,
+       .open = ceph_open,
+       .release = ceph_release,
+};
+
 const struct inode_operations ceph_dir_iops = {
        .lookup = ceph_lookup,
        .permission = ceph_permission,
@@ -1357,6 +1366,14 @@ const struct inode_operations ceph_dir_iops = {
        .atomic_open = ceph_atomic_open,
 };
 
+const struct inode_operations ceph_snapdir_iops = {
+       .lookup = ceph_lookup,
+       .permission = ceph_permission,
+       .getattr = ceph_getattr,
+       .mkdir = ceph_mkdir,
+       .rmdir = ceph_unlink,
+};
+
 const struct dentry_operations ceph_dentry_ops = {
        .d_revalidate = ceph_d_revalidate,
        .d_release = ceph_d_release,
index 905986dd4c3c9dabaf5bf3ea5e584e0ecc719a5b..d533075a823d5eb92e709547b8fe790c59cba981 100644 (file)
@@ -275,10 +275,10 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
        err = ceph_mdsc_do_request(mdsc,
                                   (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
                                   req);
+       err = ceph_handle_snapdir(req, dentry, err);
        if (err)
                goto out_req;
 
-       err = ceph_handle_snapdir(req, dentry, err);
        if (err == 0 && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
                err = ceph_handle_notrace_create(dir, dentry);
 
@@ -292,7 +292,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
        }
        if (err)
                goto out_req;
-       if (dn || dentry->d_inode == NULL || S_ISLNK(dentry->d_inode->i_mode)) {
+       if (dn || dentry->d_inode == NULL || d_is_symlink(dentry)) {
                /* make vfs retry on splice, ENOENT, or symlink */
                dout("atomic_open finish_no_open on dn %p\n", dn);
                err = finish_no_open(file, dn);
@@ -392,13 +392,14 @@ more:
        if (ret >= 0) {
                int didpages;
                if (was_short && (pos + ret < inode->i_size)) {
-                       u64 tmp = min(this_len - ret,
-                                       inode->i_size - pos - ret);
+                       int zlen = min(this_len - ret,
+                                      inode->i_size - pos - ret);
+                       int zoff = (o_direct ? buf_align : io_align) +
+                                   read + ret;
                        dout(" zero gap %llu to %llu\n",
-                               pos + ret, pos + ret + tmp);
-                       ceph_zero_page_vector_range(page_align + read + ret,
-                                                       tmp, pages);
-                       ret += tmp;
+                               pos + ret, pos + ret + zlen);
+                       ceph_zero_page_vector_range(zoff, zlen, pages);
+                       ret += zlen;
                }
 
                didpages = (page_align + ret) >> PAGE_CACHE_SHIFT;
@@ -878,28 +879,34 @@ again:
 
                i_size = i_size_read(inode);
                if (retry_op == READ_INLINE) {
-                       /* does not support inline data > PAGE_SIZE */
-                       if (i_size > PAGE_CACHE_SIZE) {
-                               ret = -EIO;
-                       } else if (iocb->ki_pos < i_size) {
+                       BUG_ON(ret > 0 || read > 0);
+                       if (iocb->ki_pos < i_size &&
+                           iocb->ki_pos < PAGE_CACHE_SIZE) {
                                loff_t end = min_t(loff_t, i_size,
                                                   iocb->ki_pos + len);
+                               end = min_t(loff_t, end, PAGE_CACHE_SIZE);
                                if (statret < end)
                                        zero_user_segment(page, statret, end);
                                ret = copy_page_to_iter(page,
                                                iocb->ki_pos & ~PAGE_MASK,
                                                end - iocb->ki_pos, to);
                                iocb->ki_pos += ret;
-                       } else {
-                               ret = 0;
+                               read += ret;
+                       }
+                       if (iocb->ki_pos < i_size && read < len) {
+                               size_t zlen = min_t(size_t, len - read,
+                                                   i_size - iocb->ki_pos);
+                               ret = iov_iter_zero(zlen, to);
+                               iocb->ki_pos += ret;
+                               read += ret;
                        }
                        __free_pages(page, 0);
-                       return ret;
+                       return read;
                }
 
                /* hit EOF or hole? */
                if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
-                       ret < len) {
+                   ret < len) {
                        dout("sync_read hit hole, ppos %lld < size %lld"
                             ", reading more\n", iocb->ki_pos,
                             inode->i_size);
index 6b51736051541ea54b6956cf6dce57576f8ffe6f..119c43c80638788f648272fbd1593e6e6bdbfd94 100644 (file)
@@ -82,8 +82,8 @@ struct inode *ceph_get_snapdir(struct inode *parent)
        inode->i_mode = parent->i_mode;
        inode->i_uid = parent->i_uid;
        inode->i_gid = parent->i_gid;
-       inode->i_op = &ceph_dir_iops;
-       inode->i_fop = &ceph_dir_fops;
+       inode->i_op = &ceph_snapdir_iops;
+       inode->i_fop = &ceph_snapdir_fops;
        ci->i_snap_caps = CEPH_CAP_PIN; /* so we can open */
        ci->i_rbytes = 0;
        return inode;
@@ -838,30 +838,31 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
                       ceph_vinop(inode), inode->i_mode);
        }
 
-       /* set dir completion flag? */
-       if (S_ISDIR(inode->i_mode) &&
-           ci->i_files == 0 && ci->i_subdirs == 0 &&
-           ceph_snap(inode) == CEPH_NOSNAP &&
-           (le32_to_cpu(info->cap.caps) & CEPH_CAP_FILE_SHARED) &&
-           (issued & CEPH_CAP_FILE_EXCL) == 0 &&
-           !__ceph_dir_is_complete(ci)) {
-               dout(" marking %p complete (empty)\n", inode);
-               __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count),
-                                       ci->i_ordered_count);
-       }
-
        /* were we issued a capability? */
        if (info->cap.caps) {
                if (ceph_snap(inode) == CEPH_NOSNAP) {
+                       unsigned caps = le32_to_cpu(info->cap.caps);
                        ceph_add_cap(inode, session,
                                     le64_to_cpu(info->cap.cap_id),
-                                    cap_fmode,
-                                    le32_to_cpu(info->cap.caps),
+                                    cap_fmode, caps,
                                     le32_to_cpu(info->cap.wanted),
                                     le32_to_cpu(info->cap.seq),
                                     le32_to_cpu(info->cap.mseq),
                                     le64_to_cpu(info->cap.realm),
                                     info->cap.flags, &new_cap);
+
+                       /* set dir completion flag? */
+                       if (S_ISDIR(inode->i_mode) &&
+                           ci->i_files == 0 && ci->i_subdirs == 0 &&
+                           (caps & CEPH_CAP_FILE_SHARED) &&
+                           (issued & CEPH_CAP_FILE_EXCL) == 0 &&
+                           !__ceph_dir_is_complete(ci)) {
+                               dout(" marking %p complete (empty)\n", inode);
+                               __ceph_dir_set_complete(ci,
+                                       atomic_read(&ci->i_release_count),
+                                       ci->i_ordered_count);
+                       }
+
                        wake = true;
                } else {
                        dout(" %p got snap_caps %s\n", inode,
@@ -1446,12 +1447,14 @@ retry_lookup:
                }
 
                if (!dn->d_inode) {
-                       dn = splice_dentry(dn, in, NULL);
-                       if (IS_ERR(dn)) {
-                               err = PTR_ERR(dn);
+                       struct dentry *realdn = splice_dentry(dn, in, NULL);
+                       if (IS_ERR(realdn)) {
+                               err = PTR_ERR(realdn);
+                               d_drop(dn);
                                dn = NULL;
                                goto next_item;
                        }
+                       dn = realdn;
                }
 
                di = dn->d_fsdata;
index 5f62fb7a5d0ae9b3338c00b724691a096c7a621b..71c073f38e547522c81fa335fcae85c5d8c7ef58 100644 (file)
@@ -480,6 +480,7 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
                mdsc->max_sessions = newmax;
        }
        mdsc->sessions[mds] = s;
+       atomic_inc(&mdsc->num_sessions);
        atomic_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
 
        ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
@@ -503,6 +504,7 @@ static void __unregister_session(struct ceph_mds_client *mdsc,
        mdsc->sessions[s->s_mds] = NULL;
        ceph_con_close(&s->s_con);
        ceph_put_mds_session(s);
+       atomic_dec(&mdsc->num_sessions);
 }
 
 /*
@@ -842,8 +844,9 @@ static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u6
        struct ceph_options *opt = mdsc->fsc->client->options;
        void *p;
 
-       const char* metadata[3][2] = {
+       const char* metadata[][2] = {
                {"hostname", utsname()->nodename},
+               {"kernel_version", utsname()->release},
                {"entity_id", opt->name ? opt->name : ""},
                {NULL, NULL}
        };
@@ -1464,19 +1467,33 @@ out_unlocked:
        return err;
 }
 
+static int check_cap_flush(struct inode *inode, u64 want_flush_seq)
+{
+       struct ceph_inode_info *ci = ceph_inode(inode);
+       int ret;
+       spin_lock(&ci->i_ceph_lock);
+       if (ci->i_flushing_caps)
+               ret = ci->i_cap_flush_seq >= want_flush_seq;
+       else
+               ret = 1;
+       spin_unlock(&ci->i_ceph_lock);
+       return ret;
+}
+
 /*
  * flush all dirty inode data to disk.
  *
  * returns true if we've flushed through want_flush_seq
  */
-static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
+static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
 {
-       int mds, ret = 1;
+       int mds;
 
        dout("check_cap_flush want %lld\n", want_flush_seq);
        mutex_lock(&mdsc->mutex);
-       for (mds = 0; ret && mds < mdsc->max_sessions; mds++) {
+       for (mds = 0; mds < mdsc->max_sessions; mds++) {
                struct ceph_mds_session *session = mdsc->sessions[mds];
+               struct inode *inode = NULL;
 
                if (!session)
                        continue;
@@ -1489,29 +1506,29 @@ static int check_cap_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq)
                                list_entry(session->s_cap_flushing.next,
                                           struct ceph_inode_info,
                                           i_flushing_item);
-                       struct inode *inode = &ci->vfs_inode;
 
-                       spin_lock(&ci->i_ceph_lock);
-                       if (ci->i_cap_flush_seq <= want_flush_seq) {
+                       if (!check_cap_flush(&ci->vfs_inode, want_flush_seq)) {
                                dout("check_cap_flush still flushing %p "
-                                    "seq %lld <= %lld to mds%d\n", inode,
-                                    ci->i_cap_flush_seq, want_flush_seq,
-                                    session->s_mds);
-                               ret = 0;
+                                    "seq %lld <= %lld to mds%d\n",
+                                    &ci->vfs_inode, ci->i_cap_flush_seq,
+                                    want_flush_seq, session->s_mds);
+                               inode = igrab(&ci->vfs_inode);
                        }
-                       spin_unlock(&ci->i_ceph_lock);
                }
                mutex_unlock(&session->s_mutex);
                ceph_put_mds_session(session);
 
-               if (!ret)
-                       return ret;
+               if (inode) {
+                       wait_event(mdsc->cap_flushing_wq,
+                                  check_cap_flush(inode, want_flush_seq));
+                       iput(inode);
+               }
+
                mutex_lock(&mdsc->mutex);
        }
 
        mutex_unlock(&mdsc->mutex);
        dout("check_cap_flush ok, flushed thru %lld\n", want_flush_seq);
-       return ret;
 }
 
 /*
@@ -1923,7 +1940,11 @@ static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
        head->num_releases = cpu_to_le16(releases);
 
        /* time stamp */
-       ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp));
+       {
+               struct ceph_timespec ts;
+               ceph_encode_timespec(&ts, &req->r_stamp);
+               ceph_encode_copy(&p, &ts, sizeof(ts));
+       }
 
        BUG_ON(p > end);
        msg->front.iov_len = p - msg->front.iov_base;
@@ -2012,7 +2033,11 @@ static int __prepare_send_request(struct ceph_mds_client *mdsc,
 
                /* time stamp */
                p = msg->front.iov_base + req->r_request_release_offset;
-               ceph_encode_copy(&p, &req->r_stamp, sizeof(req->r_stamp));
+               {
+                       struct ceph_timespec ts;
+                       ceph_encode_timespec(&ts, &req->r_stamp);
+                       ceph_encode_copy(&p, &ts, sizeof(ts));
+               }
 
                msg->front.iov_len = p - msg->front.iov_base;
                msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
@@ -2159,6 +2184,8 @@ static void kick_requests(struct ceph_mds_client *mdsc, int mds)
                p = rb_next(p);
                if (req->r_got_unsafe)
                        continue;
+               if (req->r_attempts > 0)
+                       continue; /* only new requests */
                if (req->r_session &&
                    req->r_session->s_mds == mds) {
                        dout(" kicking tid %llu\n", req->r_tid);
@@ -2286,6 +2313,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        struct ceph_mds_request *req;
        struct ceph_mds_reply_head *head = msg->front.iov_base;
        struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
+       struct ceph_snap_realm *realm;
        u64 tid;
        int err, result;
        int mds = session->s_mds;
@@ -2401,11 +2429,13 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        }
 
        /* snap trace */
+       realm = NULL;
        if (rinfo->snapblob_len) {
                down_write(&mdsc->snap_rwsem);
                ceph_update_snap_trace(mdsc, rinfo->snapblob,
-                              rinfo->snapblob + rinfo->snapblob_len,
-                              le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP);
+                               rinfo->snapblob + rinfo->snapblob_len,
+                               le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
+                               &realm);
                downgrade_write(&mdsc->snap_rwsem);
        } else {
                down_read(&mdsc->snap_rwsem);
@@ -2423,6 +2453,8 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        mutex_unlock(&req->r_fill_mutex);
 
        up_read(&mdsc->snap_rwsem);
+       if (realm)
+               ceph_put_snap_realm(mdsc, realm);
 out_err:
        mutex_lock(&mdsc->mutex);
        if (!req->r_aborted) {
@@ -2487,6 +2519,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
                dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
                BUG_ON(req->r_err);
                BUG_ON(req->r_got_result);
+               req->r_attempts = 0;
                req->r_num_fwd = fwd_seq;
                req->r_resend_mds = next_mds;
                put_request_session(req);
@@ -2580,6 +2613,14 @@ static void handle_session(struct ceph_mds_session *session,
                send_flushmsg_ack(mdsc, session, seq);
                break;
 
+       case CEPH_SESSION_FORCE_RO:
+               dout("force_session_readonly %p\n", session);
+               spin_lock(&session->s_cap_lock);
+               session->s_readonly = true;
+               spin_unlock(&session->s_cap_lock);
+               wake_up_session_caps(session, 0);
+               break;
+
        default:
                pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
                WARN_ON(1);
@@ -2610,6 +2651,7 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
                                   struct ceph_mds_session *session)
 {
        struct ceph_mds_request *req, *nreq;
+       struct rb_node *p;
        int err;
 
        dout("replay_unsafe_requests mds%d\n", session->s_mds);
@@ -2622,6 +2664,28 @@ static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
                        ceph_con_send(&session->s_con, req->r_request);
                }
        }
+
+       /*
+        * also re-send old requests when MDS enters reconnect stage. So that MDS
+        * can process completed request in clientreplay stage.
+        */
+       p = rb_first(&mdsc->request_tree);
+       while (p) {
+               req = rb_entry(p, struct ceph_mds_request, r_node);
+               p = rb_next(p);
+               if (req->r_got_unsafe)
+                       continue;
+               if (req->r_attempts == 0)
+                       continue; /* only old requests */
+               if (req->r_session &&
+                   req->r_session->s_mds == session->s_mds) {
+                       err = __prepare_send_request(mdsc, req, session->s_mds);
+                       if (!err) {
+                               ceph_msg_get(req->r_request);
+                               ceph_con_send(&session->s_con, req->r_request);
+                       }
+               }
+       }
        mutex_unlock(&mdsc->mutex);
 }
 
@@ -2787,6 +2851,8 @@ static void send_mds_reconnect(struct ceph_mds_client *mdsc,
        spin_unlock(&session->s_gen_ttl_lock);
 
        spin_lock(&session->s_cap_lock);
+       /* don't know if session is readonly */
+       session->s_readonly = 0;
        /*
         * notify __ceph_remove_cap() that we are composing cap reconnect.
         * If a cap get released before being added to the cap reconnect,
@@ -2933,9 +2999,6 @@ static void check_new_map(struct ceph_mds_client *mdsc,
                                mutex_unlock(&s->s_mutex);
                                s->s_state = CEPH_MDS_SESSION_RESTARTING;
                        }
-
-                       /* kick any requests waiting on the recovering mds */
-                       kick_requests(mdsc, i);
                } else if (oldstate == newstate) {
                        continue;  /* nothing new with this mds */
                }
@@ -3295,6 +3358,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
        init_waitqueue_head(&mdsc->session_close_wq);
        INIT_LIST_HEAD(&mdsc->waiting_for_map);
        mdsc->sessions = NULL;
+       atomic_set(&mdsc->num_sessions, 0);
        mdsc->max_sessions = 0;
        mdsc->stopping = 0;
        init_rwsem(&mdsc->snap_rwsem);
@@ -3428,14 +3492,17 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
        dout("sync\n");
        mutex_lock(&mdsc->mutex);
        want_tid = mdsc->last_tid;
-       want_flush = mdsc->cap_flush_seq;
        mutex_unlock(&mdsc->mutex);
-       dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
 
        ceph_flush_dirty_caps(mdsc);
+       spin_lock(&mdsc->cap_dirty_lock);
+       want_flush = mdsc->cap_flush_seq;
+       spin_unlock(&mdsc->cap_dirty_lock);
+
+       dout("sync want tid %lld flush_seq %lld\n", want_tid, want_flush);
 
        wait_unsafe_requests(mdsc, want_tid);
-       wait_event(mdsc->cap_flushing_wq, check_cap_flush(mdsc, want_flush));
+       wait_caps_flush(mdsc, want_flush);
 }
 
 /*
@@ -3443,17 +3510,9 @@ void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
  */
 static bool done_closing_sessions(struct ceph_mds_client *mdsc)
 {
-       int i, n = 0;
-
        if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN)
                return true;
-
-       mutex_lock(&mdsc->mutex);
-       for (i = 0; i < mdsc->max_sessions; i++)
-               if (mdsc->sessions[i])
-                       n++;
-       mutex_unlock(&mdsc->mutex);
-       return n == 0;
+       return atomic_read(&mdsc->num_sessions) == 0;
 }
 
 /*
index e2817d00f7d9f51922b2a1c04b20de97e410edfe..1875b5d985c6b0df2fbb38e16f39a78ecc76750d 100644 (file)
@@ -137,6 +137,7 @@ struct ceph_mds_session {
        int               s_nr_caps, s_trim_caps;
        int               s_num_cap_releases;
        int               s_cap_reconnect;
+       int               s_readonly;
        struct list_head  s_cap_releases; /* waiting cap_release messages */
        struct list_head  s_cap_releases_done; /* ready to send */
        struct ceph_cap  *s_cap_iterator;
@@ -272,6 +273,7 @@ struct ceph_mds_client {
        struct list_head        waiting_for_map;
 
        struct ceph_mds_session **sessions;    /* NULL for mds if no session */
+       atomic_t                num_sessions;
        int                     max_sessions;  /* len of s_mds_sessions */
        int                     stopping;      /* true if shutting down */
 
index ce35fbd4ba5d3fef3f5a05abc314df300fa4ff49..a97e39f09ba683349bb5f97e44f0d229b3a88936 100644 (file)
@@ -70,13 +70,11 @@ void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
         * safe.  we do need to protect against concurrent empty list
         * additions, however.
         */
-       if (atomic_read(&realm->nref) == 0) {
+       if (atomic_inc_return(&realm->nref) == 1) {
                spin_lock(&mdsc->snap_empty_lock);
                list_del_init(&realm->empty_item);
                spin_unlock(&mdsc->snap_empty_lock);
        }
-
-       atomic_inc(&realm->nref);
 }
 
 static void __insert_snap_realm(struct rb_root *root,
@@ -116,7 +114,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
        if (!realm)
                return ERR_PTR(-ENOMEM);
 
-       atomic_set(&realm->nref, 0);    /* tree does not take a ref */
+       atomic_set(&realm->nref, 1);    /* for caller */
        realm->ino = ino;
        INIT_LIST_HEAD(&realm->children);
        INIT_LIST_HEAD(&realm->child_item);
@@ -134,8 +132,8 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
  *
  * caller must hold snap_rwsem for write.
  */
-struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
-                                              u64 ino)
+static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
+                                                  u64 ino)
 {
        struct rb_node *n = mdsc->snap_realms.rb_node;
        struct ceph_snap_realm *r;
@@ -154,6 +152,16 @@ struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
        return NULL;
 }
 
+struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
+                                              u64 ino)
+{
+       struct ceph_snap_realm *r;
+       r = __lookup_snap_realm(mdsc, ino);
+       if (r)
+               ceph_get_snap_realm(mdsc, r);
+       return r;
+}
+
 static void __put_snap_realm(struct ceph_mds_client *mdsc,
                             struct ceph_snap_realm *realm);
 
@@ -273,7 +281,6 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
        }
        realm->parent_ino = parentino;
        realm->parent = parent;
-       ceph_get_snap_realm(mdsc, parent);
        list_add(&realm->child_item, &parent->children);
        return 1;
 }
@@ -631,12 +638,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
  * Caller must hold snap_rwsem for write.
  */
 int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
-                          void *p, void *e, bool deletion)
+                          void *p, void *e, bool deletion,
+                          struct ceph_snap_realm **realm_ret)
 {
        struct ceph_mds_snap_realm *ri;    /* encoded */
        __le64 *snaps;                     /* encoded */
        __le64 *prior_parent_snaps;        /* encoded */
-       struct ceph_snap_realm *realm;
+       struct ceph_snap_realm *realm = NULL;
+       struct ceph_snap_realm *first_realm = NULL;
        int invalidate = 0;
        int err = -ENOMEM;
        LIST_HEAD(dirty_realms);
@@ -704,13 +713,18 @@ more:
        dout("done with %llx %p, invalidated=%d, %p %p\n", realm->ino,
             realm, invalidate, p, e);
 
-       if (p < e)
-               goto more;
-
        /* invalidate when we reach the _end_ (root) of the trace */
-       if (invalidate)
+       if (invalidate && p >= e)
                rebuild_snap_realms(realm);
 
+       if (!first_realm)
+               first_realm = realm;
+       else
+               ceph_put_snap_realm(mdsc, realm);
+
+       if (p < e)
+               goto more;
+
        /*
         * queue cap snaps _after_ we've built the new snap contexts,
         * so that i_head_snapc can be set appropriately.
@@ -721,12 +735,21 @@ more:
                queue_realm_cap_snaps(realm);
        }
 
+       if (realm_ret)
+               *realm_ret = first_realm;
+       else
+               ceph_put_snap_realm(mdsc, first_realm);
+
        __cleanup_empty_realms(mdsc);
        return 0;
 
 bad:
        err = -EINVAL;
 fail:
+       if (realm && !IS_ERR(realm))
+               ceph_put_snap_realm(mdsc, realm);
+       if (first_realm)
+               ceph_put_snap_realm(mdsc, first_realm);
        pr_err("update_snap_trace error %d\n", err);
        return err;
 }
@@ -844,7 +867,6 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
                        if (IS_ERR(realm))
                                goto out;
                }
-               ceph_get_snap_realm(mdsc, realm);
 
                dout("splitting snap_realm %llx %p\n", realm->ino, realm);
                for (i = 0; i < num_split_inos; i++) {
@@ -905,7 +927,7 @@ skip_inode:
                /* we may have taken some of the old realm's children. */
                for (i = 0; i < num_split_realms; i++) {
                        struct ceph_snap_realm *child =
-                               ceph_lookup_snap_realm(mdsc,
+                               __lookup_snap_realm(mdsc,
                                           le64_to_cpu(split_realms[i]));
                        if (!child)
                                continue;
@@ -918,7 +940,7 @@ skip_inode:
         * snap, we can avoid queueing cap_snaps.
         */
        ceph_update_snap_trace(mdsc, p, e,
-                              op == CEPH_SNAP_OP_DESTROY);
+                              op == CEPH_SNAP_OP_DESTROY, NULL);
 
        if (op == CEPH_SNAP_OP_SPLIT)
                /* we took a reference when we created the realm, above */
index 5ae62587a71d5c7e4e413e1252572707433aaf7b..a63997b8bcff8ac6d5967361700d79f1b6e35c3f 100644 (file)
@@ -414,6 +414,10 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
                seq_puts(m, ",noshare");
        if (opt->flags & CEPH_OPT_NOCRC)
                seq_puts(m, ",nocrc");
+       if (opt->flags & CEPH_OPT_NOMSGAUTH)
+               seq_puts(m, ",nocephx_require_signatures");
+       if ((opt->flags & CEPH_OPT_TCP_NODELAY) == 0)
+               seq_puts(m, ",notcp_nodelay");
 
        if (opt->name)
                seq_printf(m, ",name=%s", opt->name);
index e1aa32d0759d12c3709fb3d66c03982756d55599..04c8124ed30ecae65bf7344c96f7c984ed5a1d03 100644 (file)
@@ -693,7 +693,8 @@ extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,
 extern void ceph_put_snap_realm(struct ceph_mds_client *mdsc,
                                struct ceph_snap_realm *realm);
 extern int ceph_update_snap_trace(struct ceph_mds_client *m,
-                                 void *p, void *e, bool deletion);
+                                 void *p, void *e, bool deletion,
+                                 struct ceph_snap_realm **realm_ret);
 extern void ceph_handle_snap(struct ceph_mds_client *mdsc,
                             struct ceph_mds_session *session,
                             struct ceph_msg *msg);
@@ -892,7 +893,9 @@ extern void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
 int ceph_uninline_data(struct file *filp, struct page *locked_page);
 /* dir.c */
 extern const struct file_operations ceph_dir_fops;
+extern const struct file_operations ceph_snapdir_fops;
 extern const struct inode_operations ceph_dir_iops;
+extern const struct inode_operations ceph_snapdir_iops;
 extern const struct dentry_operations ceph_dentry_ops, ceph_snap_dentry_ops,
        ceph_snapdir_dentry_ops;
 
index 281ee011bb6a936125cdcfdde6d8122e66447b15..60cb88c1dd2bf88bb12f3e5451e33ed51d5ac1ff 100644 (file)
@@ -304,7 +304,7 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry,
                             (const char *) old_name, (const char *)new_name);
        if (!error) {
                if (new_dentry->d_inode) {
-                       if (S_ISDIR(new_dentry->d_inode->i_mode)) {
+                       if (d_is_dir(new_dentry)) {
                                coda_dir_drop_nlink(old_dir);
                                coda_dir_inc_nlink(new_dir);
                        }
index a315677e44d34d8501b1b03629da0201283f5b1f..b65d1ef532d52d692cc7ebbdc3c7350d1dd9d735 100644 (file)
@@ -69,14 +69,13 @@ extern struct kmem_cache *configfs_dir_cachep;
 extern int configfs_is_root(struct config_item *item);
 
 extern struct inode * configfs_new_inode(umode_t mode, struct configfs_dirent *, struct super_block *);
-extern int configfs_create(struct dentry *, umode_t mode, int (*init)(struct inode *));
+extern int configfs_create(struct dentry *, umode_t mode, void (*init)(struct inode *));
 
 extern int configfs_create_file(struct config_item *, const struct configfs_attribute *);
 extern int configfs_make_dirent(struct configfs_dirent *,
                                struct dentry *, void *, umode_t, int);
 extern int configfs_dirent_is_ready(struct configfs_dirent *);
 
-extern int configfs_add_file(struct dentry *, const struct configfs_attribute *, int);
 extern void configfs_hash_and_remove(struct dentry * dir, const char * name);
 
 extern const unsigned char * configfs_get_name(struct configfs_dirent *sd);
index c9c298bd3058924b8fed471ccdebabeef2feeb88..cf0db005d2f58ab2ed2b42217777f643e6d0d235 100644 (file)
@@ -240,60 +240,26 @@ int configfs_make_dirent(struct configfs_dirent * parent_sd,
        return 0;
 }
 
-static int init_dir(struct inode * inode)
+static void init_dir(struct inode * inode)
 {
        inode->i_op = &configfs_dir_inode_operations;
        inode->i_fop = &configfs_dir_operations;
 
        /* directory inodes start off with i_nlink == 2 (for "." entry) */
        inc_nlink(inode);
-       return 0;
 }
 
-static int configfs_init_file(struct inode * inode)
+static void configfs_init_file(struct inode * inode)
 {
        inode->i_size = PAGE_SIZE;
        inode->i_fop = &configfs_file_operations;
-       return 0;
 }
 
-static int init_symlink(struct inode * inode)
+static void init_symlink(struct inode * inode)
 {
        inode->i_op = &configfs_symlink_inode_operations;
-       return 0;
-}
-
-static int create_dir(struct config_item *k, struct dentry *d)
-{
-       int error;
-       umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
-       struct dentry *p = d->d_parent;
-
-       BUG_ON(!k);
-
-       error = configfs_dirent_exists(p->d_fsdata, d->d_name.name);
-       if (!error)
-               error = configfs_make_dirent(p->d_fsdata, d, k, mode,
-                                            CONFIGFS_DIR | CONFIGFS_USET_CREATING);
-       if (!error) {
-               configfs_set_dir_dirent_depth(p->d_fsdata, d->d_fsdata);
-               error = configfs_create(d, mode, init_dir);
-               if (!error) {
-                       inc_nlink(p->d_inode);
-               } else {
-                       struct configfs_dirent *sd = d->d_fsdata;
-                       if (sd) {
-                               spin_lock(&configfs_dirent_lock);
-                               list_del_init(&sd->s_sibling);
-                               spin_unlock(&configfs_dirent_lock);
-                               configfs_put(sd);
-                       }
-               }
-       }
-       return error;
 }
 
-
 /**
  *     configfs_create_dir - create a directory for an config_item.
  *     @item:          config_itemwe're creating directory for.
@@ -303,11 +269,37 @@ static int create_dir(struct config_item *k, struct dentry *d)
  *     until it is validated by configfs_dir_set_ready()
  */
 
-static int configfs_create_dir(struct config_item * item, struct dentry *dentry)
+static int configfs_create_dir(struct config_item *item, struct dentry *dentry)
 {
-       int error = create_dir(item, dentry);
-       if (!error)
+       int error;
+       umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
+       struct dentry *p = dentry->d_parent;
+
+       BUG_ON(!item);
+
+       error = configfs_dirent_exists(p->d_fsdata, dentry->d_name.name);
+       if (unlikely(error))
+               return error;
+
+       error = configfs_make_dirent(p->d_fsdata, dentry, item, mode,
+                                    CONFIGFS_DIR | CONFIGFS_USET_CREATING);
+       if (unlikely(error))
+               return error;
+
+       configfs_set_dir_dirent_depth(p->d_fsdata, dentry->d_fsdata);
+       error = configfs_create(dentry, mode, init_dir);
+       if (!error) {
+               inc_nlink(p->d_inode);
                item->ci_dentry = dentry;
+       } else {
+               struct configfs_dirent *sd = dentry->d_fsdata;
+               if (sd) {
+                       spin_lock(&configfs_dirent_lock);
+                       list_del_init(&sd->s_sibling);
+                       spin_unlock(&configfs_dirent_lock);
+                       configfs_put(sd);
+               }
+       }
        return error;
 }
 
index 1d1c41f1014d9039c0a3ec217d6c6e53856760b1..56d2cdc9ae0a7213f91bb5e332b1513fda887b79 100644 (file)
@@ -313,21 +313,6 @@ const struct file_operations configfs_file_operations = {
        .release        = configfs_release,
 };
 
-
-int configfs_add_file(struct dentry * dir, const struct configfs_attribute * attr, int type)
-{
-       struct configfs_dirent * parent_sd = dir->d_fsdata;
-       umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
-       int error = 0;
-
-       mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
-       error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode, type);
-       mutex_unlock(&dir->d_inode->i_mutex);
-
-       return error;
-}
-
-
 /**
  *     configfs_create_file - create an attribute file for an item.
  *     @item:  item we're creating for.
@@ -336,9 +321,16 @@ int configfs_add_file(struct dentry * dir, const struct configfs_attribute * att
 
 int configfs_create_file(struct config_item * item, const struct configfs_attribute * attr)
 {
-       BUG_ON(!item || !item->ci_dentry || !attr);
+       struct dentry *dir = item->ci_dentry;
+       struct configfs_dirent *parent_sd = dir->d_fsdata;
+       umode_t mode = (attr->ca_mode & S_IALLUGO) | S_IFREG;
+       int error = 0;
 
-       return configfs_add_file(item->ci_dentry, attr,
-                                CONFIGFS_ITEM_ATTR);
+       mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_NORMAL);
+       error = configfs_make_dirent(parent_sd, NULL, (void *) attr, mode,
+                                    CONFIGFS_ITEM_ATTR);
+       mutex_unlock(&dir->d_inode->i_mutex);
+
+       return error;
 }
 
index 65af861471541924525bcd9c04ab32a931296ba8..5423a6a6ecc8350c0284d307ac47cf6afad9238a 100644 (file)
@@ -176,7 +176,7 @@ static void configfs_set_inode_lock_class(struct configfs_dirent *sd,
 
 #endif /* CONFIG_LOCKDEP */
 
-int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct inode *))
+int configfs_create(struct dentry * dentry, umode_t mode, void (*init)(struct inode *))
 {
        int error = 0;
        struct inode *inode = NULL;
@@ -198,13 +198,7 @@ int configfs_create(struct dentry * dentry, umode_t mode, int (*init)(struct ino
        p_inode->i_mtime = p_inode->i_ctime = CURRENT_TIME;
        configfs_set_inode_lock_class(sd, inode);
 
-       if (init) {
-               error = init(inode);
-               if (error) {
-                       iput(inode);
-                       return error;
-               }
-       }
+       init(inode);
        d_instantiate(dentry, inode);
        if (S_ISDIR(mode) || S_ISLNK(mode))
                dget(dentry);  /* pin link and directory dentries in core */
@@ -242,7 +236,7 @@ void configfs_drop_dentry(struct configfs_dirent * sd, struct dentry * parent)
 
        if (dentry) {
                spin_lock(&dentry->d_lock);
-               if (!(d_unhashed(dentry) && dentry->d_inode)) {
+               if (!d_unhashed(dentry) && dentry->d_inode) {
                        dget_dlock(dentry);
                        __d_drop(dentry);
                        spin_unlock(&dentry->d_lock);
index b5c86ffd5033420523934c7153080d8cdc605bea..f319926ddf8cbc5cc90a003628bf500701ce3c25 100644 (file)
@@ -572,7 +572,7 @@ void do_coredump(const siginfo_t *siginfo)
                         *
                         * Normally core limits are irrelevant to pipes, since
                         * we're not writing to the file system, but we use
-                        * cprm.limit of 1 here as a speacial value, this is a
+                        * cprm.limit of 1 here as a special value, this is a
                         * consistent way to catch recursive crashes.
                         * We can still crash if the core_pattern binary sets
                         * RLIM_CORE = !1, but it runs as root, and can do
index dc400fd29f4d1c3c8e2265b4275aaabe4250e1fb..c71e3732e53bcebbffca749e65b7095fd4ff6e7e 100644 (file)
@@ -1659,9 +1659,25 @@ void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op)
 }
 EXPORT_SYMBOL(d_set_d_op);
 
+
+/*
+ * d_set_fallthru - Mark a dentry as falling through to a lower layer
+ * @dentry - The dentry to mark
+ *
+ * Mark a dentry as falling through to the lower layer (as set with
+ * d_pin_lower()).  This flag may be recorded on the medium.
+ */
+void d_set_fallthru(struct dentry *dentry)
+{
+       spin_lock(&dentry->d_lock);
+       dentry->d_flags |= DCACHE_FALLTHRU;
+       spin_unlock(&dentry->d_lock);
+}
+EXPORT_SYMBOL(d_set_fallthru);
+
 static unsigned d_flags_for_inode(struct inode *inode)
 {
-       unsigned add_flags = DCACHE_FILE_TYPE;
+       unsigned add_flags = DCACHE_REGULAR_TYPE;
 
        if (!inode)
                return DCACHE_MISS_TYPE;
@@ -1674,13 +1690,21 @@ static unsigned d_flags_for_inode(struct inode *inode)
                        else
                                inode->i_opflags |= IOP_LOOKUP;
                }
-       } else if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
-               if (unlikely(inode->i_op->follow_link))
+               goto type_determined;
+       }
+
+       if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) {
+               if (unlikely(inode->i_op->follow_link)) {
                        add_flags = DCACHE_SYMLINK_TYPE;
-               else
-                       inode->i_opflags |= IOP_NOFOLLOW;
+                       goto type_determined;
+               }
+               inode->i_opflags |= IOP_NOFOLLOW;
        }
 
+       if (unlikely(!S_ISREG(inode->i_mode)))
+               add_flags = DCACHE_SPECIAL_TYPE;
+
+type_determined:
        if (unlikely(IS_AUTOMOUNT(inode)))
                add_flags |= DCACHE_NEED_AUTOMOUNT;
        return add_flags;
@@ -1691,7 +1715,8 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
        unsigned add_flags = d_flags_for_inode(inode);
 
        spin_lock(&dentry->d_lock);
-       __d_set_type(dentry, add_flags);
+       dentry->d_flags &= ~(DCACHE_ENTRY_TYPE | DCACHE_FALLTHRU);
+       dentry->d_flags |= add_flags;
        if (inode)
                hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
        dentry->d_inode = inode;
index 45b18a5e225c3bc6fffe927cbad3257ccafc2109..96400ab42d135e7d572d29de9d4f3637fedd7817 100644 (file)
@@ -169,10 +169,19 @@ static int debugfs_show_options(struct seq_file *m, struct dentry *root)
        return 0;
 }
 
+static void debugfs_evict_inode(struct inode *inode)
+{
+       truncate_inode_pages_final(&inode->i_data);
+       clear_inode(inode);
+       if (S_ISLNK(inode->i_mode))
+               kfree(inode->i_private);
+}
+
 static const struct super_operations debugfs_super_operations = {
        .statfs         = simple_statfs,
        .remount_fs     = debugfs_remount,
        .show_options   = debugfs_show_options,
+       .evict_inode    = debugfs_evict_inode,
 };
 
 static struct vfsmount *debugfs_automount(struct path *path)
@@ -511,23 +520,14 @@ static int __debugfs_remove(struct dentry *dentry, struct dentry *parent)
        int ret = 0;
 
        if (debugfs_positive(dentry)) {
-               if (dentry->d_inode) {
-                       dget(dentry);
-                       switch (dentry->d_inode->i_mode & S_IFMT) {
-                       case S_IFDIR:
-                               ret = simple_rmdir(parent->d_inode, dentry);
-                               break;
-                       case S_IFLNK:
-                               kfree(dentry->d_inode->i_private);
-                               /* fall through */
-                       default:
-                               simple_unlink(parent->d_inode, dentry);
-                               break;
-                       }
-                       if (!ret)
-                               d_delete(dentry);
-                       dput(dentry);
-               }
+               dget(dentry);
+               if (S_ISDIR(dentry->d_inode->i_mode))
+                       ret = simple_rmdir(parent->d_inode, dentry);
+               else
+                       simple_unlink(parent->d_inode, dentry);
+               if (!ret)
+                       d_delete(dentry);
+               dput(dentry);
        }
        return ret;
 }
@@ -690,7 +690,7 @@ struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
        }
        d_move(old_dentry, dentry);
        fsnotify_move(old_dir->d_inode, new_dir->d_inode, old_name,
-               S_ISDIR(old_dentry->d_inode->i_mode),
+               d_is_dir(old_dentry),
                NULL, old_dentry);
        fsnotify_oldname_free(old_name);
        unlock_rename(new_dir, old_dir);
index 6f4e659f508f303bdadcc8922b2cc1a1397bb51b..b07731e68c0b4d39cf75a5840033638cf37c123f 100644 (file)
@@ -230,7 +230,7 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
        }
        ecryptfs_set_file_lower(
                file, ecryptfs_inode_to_private(inode)->lower_file);
-       if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) {
+       if (d_is_dir(ecryptfs_dentry)) {
                ecryptfs_printk(KERN_DEBUG, "This is a directory\n");
                mutex_lock(&crypt_stat->cs_mutex);
                crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
index 34b36a5040593e960ff3da4fdc20dcaa5c5cb8f6..b08b5187f6622cb6c7934d6dba9fcdfdbb0aefb6 100644 (file)
@@ -907,9 +907,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
        lower_inode = ecryptfs_inode_to_lower(inode);
        lower_dentry = ecryptfs_dentry_to_lower(dentry);
        mutex_lock(&crypt_stat->cs_mutex);
-       if (S_ISDIR(dentry->d_inode->i_mode))
+       if (d_is_dir(dentry))
                crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
-       else if (S_ISREG(dentry->d_inode->i_mode)
+       else if (d_is_reg(dentry)
                 && (!(crypt_stat->flags & ECRYPTFS_POLICY_APPLIED)
                     || !(crypt_stat->flags & ECRYPTFS_KEY_VALID))) {
                struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
index fdfd206c737a39d20853ebe069ac7dc706f61dd6..714cd37a6ba30fd970b8384a2c2b26fc5209351f 100644 (file)
@@ -429,7 +429,7 @@ struct dentry *exportfs_decode_fh(struct vfsmount *mnt, struct fid *fid,
        if (IS_ERR(result))
                return result;
 
-       if (S_ISDIR(result->d_inode->i_mode)) {
+       if (d_is_dir(result)) {
                /*
                 * This request is for a directory.
                 *
index 982d934fd9ac98338377d3b1621b3d577531b6e6..f63c3d5805c4c156ad3ed412cbecf85e700cf9d2 100644 (file)
@@ -364,7 +364,8 @@ struct flex_groups {
 #define EXT4_DIRTY_FL                  0x00000100
 #define EXT4_COMPRBLK_FL               0x00000200 /* One or more compressed clusters */
 #define EXT4_NOCOMPR_FL                        0x00000400 /* Don't compress */
-#define EXT4_ECOMPR_FL                 0x00000800 /* Compression error */
+       /* nb: was previously EXT2_ECOMPR_FL */
+#define EXT4_ENCRYPT_FL                        0x00000800 /* encrypted file */
 /* End compression flags --- maybe not all used */
 #define EXT4_INDEX_FL                  0x00001000 /* hash-indexed directory */
 #define EXT4_IMAGIC_FL                 0x00002000 /* AFS directory */
@@ -421,7 +422,7 @@ enum {
        EXT4_INODE_DIRTY        = 8,
        EXT4_INODE_COMPRBLK     = 9,    /* One or more compressed clusters */
        EXT4_INODE_NOCOMPR      = 10,   /* Don't compress */
-       EXT4_INODE_ECOMPR       = 11,   /* Compression error */
+       EXT4_INODE_ENCRYPT      = 11,   /* Compression error */
 /* End compression flags --- maybe not all used */
        EXT4_INODE_INDEX        = 12,   /* hash-indexed directory */
        EXT4_INODE_IMAGIC       = 13,   /* AFS directory */
@@ -466,7 +467,7 @@ static inline void ext4_check_flag_values(void)
        CHECK_FLAG_VALUE(DIRTY);
        CHECK_FLAG_VALUE(COMPRBLK);
        CHECK_FLAG_VALUE(NOCOMPR);
-       CHECK_FLAG_VALUE(ECOMPR);
+       CHECK_FLAG_VALUE(ENCRYPT);
        CHECK_FLAG_VALUE(INDEX);
        CHECK_FLAG_VALUE(IMAGIC);
        CHECK_FLAG_VALUE(JOURNAL_DATA);
@@ -1048,6 +1049,12 @@ extern void ext4_set_bits(void *bm, int cur, int len);
 /* Metadata checksum algorithm codes */
 #define EXT4_CRC32C_CHKSUM             1
 
+/* Encryption algorithms */
+#define EXT4_ENCRYPTION_MODE_INVALID           0
+#define EXT4_ENCRYPTION_MODE_AES_256_XTS       1
+#define EXT4_ENCRYPTION_MODE_AES_256_GCM       2
+#define EXT4_ENCRYPTION_MODE_AES_256_CBC       3
+
 /*
  * Structure of the super block
  */
@@ -1161,7 +1168,8 @@ struct ext4_super_block {
        __le32  s_grp_quota_inum;       /* inode for tracking group quota */
        __le32  s_overhead_clusters;    /* overhead blocks/clusters in fs */
        __le32  s_backup_bgs[2];        /* groups with sparse_super2 SBs */
-       __le32  s_reserved[106];        /* Padding to the end of the block */
+       __u8    s_encrypt_algos[4];     /* Encryption algorithms in use  */
+       __le32  s_reserved[105];        /* Padding to the end of the block */
        __le32  s_checksum;             /* crc32c(superblock) */
 };
 
@@ -1527,6 +1535,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
  * GDT_CSUM bits are mutually exclusive.
  */
 #define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM   0x0400
+#define EXT4_FEATURE_RO_COMPAT_READONLY                0x1000
 
 #define EXT4_FEATURE_INCOMPAT_COMPRESSION      0x0001
 #define EXT4_FEATURE_INCOMPAT_FILETYPE         0x0002
@@ -1542,6 +1551,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
 #define EXT4_FEATURE_INCOMPAT_BG_USE_META_CSUM 0x2000 /* use crc32c for bg */
 #define EXT4_FEATURE_INCOMPAT_LARGEDIR         0x4000 /* >2GB or 3-lvl htree */
 #define EXT4_FEATURE_INCOMPAT_INLINE_DATA      0x8000 /* data in inode */
+#define EXT4_FEATURE_INCOMPAT_ENCRYPT          0x10000
 
 #define EXT2_FEATURE_COMPAT_SUPP       EXT4_FEATURE_COMPAT_EXT_ATTR
 #define EXT2_FEATURE_INCOMPAT_SUPP     (EXT4_FEATURE_INCOMPAT_FILETYPE| \
index 6b9878a24182b06125cb496ef973ff0d9b739106..45fe924f82bce2ff76e3e74b45ec1833729433ea 100644 (file)
@@ -1401,10 +1401,7 @@ end_range:
                                 * to free. Everything was covered by the start
                                 * of the range.
                                 */
-                               return 0;
-                       } else {
-                               /* Shared branch grows from an indirect block */
-                               partial2--;
+                               goto do_indirects;
                        }
                } else {
                        /*
@@ -1435,56 +1432,96 @@ end_range:
        /* Punch happened within the same level (n == n2) */
        partial = ext4_find_shared(inode, n, offsets, chain, &nr);
        partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
-       /*
-        * ext4_find_shared returns Indirect structure which
-        * points to the last element which should not be
-        * removed by truncate. But this is end of the range
-        * in punch_hole so we need to point to the next element
-        */
-       partial2->p++;
-       while ((partial > chain) || (partial2 > chain2)) {
-               /* We're at the same block, so we're almost finished */
-               if ((partial->bh && partial2->bh) &&
-                   (partial->bh->b_blocknr == partial2->bh->b_blocknr)) {
-                       if ((partial > chain) && (partial2 > chain2)) {
+
+       /* Free top, but only if partial2 isn't its subtree. */
+       if (nr) {
+               int level = min(partial - chain, partial2 - chain2);
+               int i;
+               int subtree = 1;
+
+               for (i = 0; i <= level; i++) {
+                       if (offsets[i] != offsets2[i]) {
+                               subtree = 0;
+                               break;
+                       }
+               }
+
+               if (!subtree) {
+                       if (partial == chain) {
+                               /* Shared branch grows from the inode */
+                               ext4_free_branches(handle, inode, NULL,
+                                                  &nr, &nr+1,
+                                                  (chain+n-1) - partial);
+                               *partial->p = 0;
+                       } else {
+                               /* Shared branch grows from an indirect block */
+                               BUFFER_TRACE(partial->bh, "get_write_access");
                                ext4_free_branches(handle, inode, partial->bh,
-                                                  partial->p + 1,
-                                                  partial2->p,
+                                                  partial->p,
+                                                  partial->p+1,
                                                   (chain+n-1) - partial);
-                               BUFFER_TRACE(partial->bh, "call brelse");
-                               brelse(partial->bh);
-                               BUFFER_TRACE(partial2->bh, "call brelse");
-                               brelse(partial2->bh);
                        }
-                       return 0;
                }
+       }
+
+       if (!nr2) {
                /*
-                * Clear the ends of indirect blocks on the shared branch
-                * at the start of the range
+                * ext4_find_shared returns Indirect structure which
+                * points to the last element which should not be
+                * removed by truncate. But this is end of the range
+                * in punch_hole so we need to point to the next element
                 */
-               if (partial > chain) {
+               partial2->p++;
+       }
+
+       while (partial > chain || partial2 > chain2) {
+               int depth = (chain+n-1) - partial;
+               int depth2 = (chain2+n2-1) - partial2;
+
+               if (partial > chain && partial2 > chain2 &&
+                   partial->bh->b_blocknr == partial2->bh->b_blocknr) {
+                       /*
+                        * We've converged on the same block. Clear the range,
+                        * then we're done.
+                        */
                        ext4_free_branches(handle, inode, partial->bh,
-                                  partial->p + 1,
-                                  (__le32 *)partial->bh->b_data+addr_per_block,
-                                  (chain+n-1) - partial);
+                                          partial->p + 1,
+                                          partial2->p,
+                                          (chain+n-1) - partial);
                        BUFFER_TRACE(partial->bh, "call brelse");
                        brelse(partial->bh);
-                       partial--;
+                       BUFFER_TRACE(partial2->bh, "call brelse");
+                       brelse(partial2->bh);
+                       return 0;
                }
+
                /*
-                * Clear the ends of indirect blocks on the shared branch
-                * at the end of the range
+                * The start and end partial branches may not be at the same
+                * level even though the punch happened within one level. So, we
+                * give them a chance to arrive at the same level, then walk
+                * them in step with each other until we converge on the same
+                * block.
                 */
-               if (partial2 > chain2) {
+               if (partial > chain && depth <= depth2) {
+                       ext4_free_branches(handle, inode, partial->bh,
+                                          partial->p + 1,
+                                          (__le32 *)partial->bh->b_data+addr_per_block,
+                                          (chain+n-1) - partial);
+                       BUFFER_TRACE(partial->bh, "call brelse");
+                       brelse(partial->bh);
+                       partial--;
+               }
+               if (partial2 > chain2 && depth2 <= depth) {
                        ext4_free_branches(handle, inode, partial2->bh,
                                           (__le32 *)partial2->bh->b_data,
                                           partial2->p,
-                                          (chain2+n-1) - partial2);
+                                          (chain2+n2-1) - partial2);
                        BUFFER_TRACE(partial2->bh, "call brelse");
                        brelse(partial2->bh);
                        partial2--;
                }
        }
+       return 0;
 
 do_indirects:
        /* Kill the remaining (whole) subtrees */
index 85404f15e53a28860ce5a7be08220106b76fc06f..5cb9a212b86f3efd69ca604df07dc20b901dabb1 100644 (file)
@@ -1024,6 +1024,7 @@ static int ext4_write_end(struct file *file,
 {
        handle_t *handle = ext4_journal_current_handle();
        struct inode *inode = mapping->host;
+       loff_t old_size = inode->i_size;
        int ret = 0, ret2;
        int i_size_changed = 0;
 
@@ -1054,6 +1055,8 @@ static int ext4_write_end(struct file *file,
        unlock_page(page);
        page_cache_release(page);
 
+       if (old_size < pos)
+               pagecache_isize_extended(inode, old_size, pos);
        /*
         * Don't mark the inode dirty under page lock. First, it unnecessarily
         * makes the holding time of page lock longer. Second, it forces lock
@@ -1095,6 +1098,7 @@ static int ext4_journalled_write_end(struct file *file,
 {
        handle_t *handle = ext4_journal_current_handle();
        struct inode *inode = mapping->host;
+       loff_t old_size = inode->i_size;
        int ret = 0, ret2;
        int partial = 0;
        unsigned from, to;
@@ -1127,6 +1131,9 @@ static int ext4_journalled_write_end(struct file *file,
        unlock_page(page);
        page_cache_release(page);
 
+       if (old_size < pos)
+               pagecache_isize_extended(inode, old_size, pos);
+
        if (size_changed) {
                ret2 = ext4_mark_inode_dirty(handle, inode);
                if (!ret)
index 1adac6868e6fd0e97f91fa871ed45288fffc5cb6..e061e66c82800f700b7642e4c82fa2cc836be05f 100644 (file)
@@ -2779,6 +2779,12 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
        if (readonly)
                return 1;
 
+       if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_READONLY)) {
+               ext4_msg(sb, KERN_INFO, "filesystem is read-only");
+               sb->s_flags |= MS_RDONLY;
+               return 1;
+       }
+
        /* Check that feature set is OK for a read-write mount */
        if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) {
                ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
@@ -3936,9 +3942,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
        spin_lock_init(&sbi->s_next_gen_lock);
 
-       init_timer(&sbi->s_err_report);
-       sbi->s_err_report.function = print_daily_error_info;
-       sbi->s_err_report.data = (unsigned long) sb;
+       setup_timer(&sbi->s_err_report, print_daily_error_info,
+               (unsigned long) sb);
 
        /* Register extent status tree shrinker */
        if (ext4_es_register_shrinker(sbi))
@@ -4866,9 +4871,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        if (sbi->s_journal && sbi->s_journal->j_task->io_context)
                journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
 
-       /*
-        * Allow the "check" option to be passed as a remount option.
-        */
        if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
                err = -EINVAL;
                goto restore_opts;
@@ -4877,17 +4879,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
            test_opt(sb, JOURNAL_CHECKSUM)) {
                ext4_msg(sb, KERN_ERR, "changing journal_checksum "
-                        "during remount not supported");
-               err = -EINVAL;
-               goto restore_opts;
-       }
-
-       if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
-           test_opt(sb, JOURNAL_CHECKSUM)) {
-               ext4_msg(sb, KERN_ERR, "changing journal_checksum "
-                        "during remount not supported");
-               err = -EINVAL;
-               goto restore_opts;
+                        "during remount not supported; ignoring");
+               sbi->s_mount_opt ^= EXT4_MOUNT_JOURNAL_CHECKSUM;
        }
 
        if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
@@ -4963,7 +4956,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
                                ext4_mark_recovery_complete(sb, es);
                } else {
                        /* Make sure we can mount this feature set readwrite */
-                       if (!ext4_feature_set_ok(sb, 0)) {
+                       if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
+                                       EXT4_FEATURE_RO_COMPAT_READONLY) ||
+                           !ext4_feature_set_ok(sb, 0)) {
                                err = -EROFS;
                                goto restore_opts;
                        }
index 073657f755d4a5b9c0d08acca991fbd35a9f37f0..e907052eeadb69f683df3c7d8838106c6e36905b 100644 (file)
@@ -769,9 +769,9 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
                struct inode *inode = wb_inode(wb->b_io.prev);
                struct super_block *sb = inode->i_sb;
 
-               if (!grab_super_passive(sb)) {
+               if (!trylock_super(sb)) {
                        /*
-                        * grab_super_passive() may fail consistently due to
+                        * trylock_super() may fail consistently due to
                         * s_umount being grabbed by someone else. Don't use
                         * requeue_io() to avoid busy retrying the inode/sb.
                         */
@@ -779,7 +779,7 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb,
                        continue;
                }
                wrote += writeback_sb_inodes(sb, wb, work);
-               drop_super(sb);
+               up_read(&sb->s_umount);
 
                /* refer to the same tests at the end of writeback_sb_inodes */
                if (wrote) {
index 08e7b1a9d5d0edaca8b94ef386d9200078958df3..1545b711ddcfdc925410b3b553b6597b93c71a25 100644 (file)
@@ -971,7 +971,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
                        err = -EBUSY;
                        goto badentry;
                }
-               if (S_ISDIR(entry->d_inode->i_mode)) {
+               if (d_is_dir(entry)) {
                        shrink_dcache_parent(entry);
                        if (!simple_empty(entry)) {
                                err = -ENOTEMPTY;
index 6371192961e2260cbbb9976ad1c636d89b92cee6..487527b42d94a381d329d8be5ae4459cafabe15b 100644 (file)
@@ -1809,7 +1809,7 @@ int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
                gfs2_consist_inode(dip);
        dip->i_entries--;
        dip->i_inode.i_mtime = dip->i_inode.i_ctime = tv;
-       if (S_ISDIR(dentry->d_inode->i_mode))
+       if (d_is_dir(dentry))
                drop_nlink(&dip->i_inode);
        mark_inode_dirty(&dip->i_inode);
 
index 435bea231cc6e83031976a3974bbf24d81a0b8ab..f0235c1640af7ec29edb92541f66dfc739c9b0db 100644 (file)
@@ -530,7 +530,7 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry,
 
        /* Unlink destination if it already exists */
        if (new_dentry->d_inode) {
-               if (S_ISDIR(new_dentry->d_inode->i_mode))
+               if (d_is_dir(new_dentry))
                        res = hfsplus_rmdir(new_dir, new_dentry);
                else
                        res = hfsplus_unlink(new_dir, new_dentry);
index 5f2755117ce775dea45cb5c2bbc369568f991f29..043ac9d77262a858464adad185969109902c9479 100644 (file)
@@ -678,10 +678,10 @@ static struct inode *get_inode(struct super_block *sb, struct dentry *dentry)
                return NULL;
        }
 
-       if (S_ISDIR(dentry->d_inode->i_mode)) {
+       if (d_is_dir(dentry)) {
                inode->i_op = &hppfs_dir_iops;
                inode->i_fop = &hppfs_dir_fops;
-       } else if (S_ISLNK(dentry->d_inode->i_mode)) {
+       } else if (d_is_symlink(dentry)) {
                inode->i_op = &hppfs_link_iops;
                inode->i_fop = &hppfs_file_fops;
        } else {
index 30459dab409dd5f8b08f22d69b589f9c91dd2678..01dce1d1476b7bc93633f787e989c464c5f2ef58 100644 (file)
@@ -84,7 +84,7 @@ extern struct file *get_empty_filp(void);
  * super.c
  */
 extern int do_remount_sb(struct super_block *, int, void *, int);
-extern bool grab_super_passive(struct super_block *sb);
+extern bool trylock_super(struct super_block *sb);
 extern struct dentry *mount_fs(struct file_system_type *,
                               int, const char *, void *);
 extern struct super_block *user_get_super(dev_t);
index bcbef08a4d8fc8873994eb37d35881626f05af70..b5128c6e63ad6644d19bf861a062d63f48265a4d 100644 (file)
@@ -524,6 +524,9 @@ static int do_one_pass(journal_t *journal,
                        if (descr_csum_size > 0 &&
                            !jbd2_descr_block_csum_verify(journal,
                                                          bh->b_data)) {
+                               printk(KERN_ERR "JBD2: Invalid checksum "
+                                      "recovering block %lu in log\n",
+                                      next_log_block);
                                err = -EIO;
                                brelse(bh);
                                goto failed;
index 938556025d643349b5166e54b5cfc55ea4767dbe..f21b6fb5e4c42f219022edb51c84a0d0674bd4d2 100644 (file)
@@ -252,7 +252,7 @@ static int jffs2_link (struct dentry *old_dentry, struct inode *dir_i, struct de
        if (!f->inocache)
                return -EIO;
 
-       if (S_ISDIR(old_dentry->d_inode->i_mode))
+       if (d_is_dir(old_dentry))
                return -EPERM;
 
        /* XXX: This is ugly */
@@ -772,7 +772,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
         */
        if (new_dentry->d_inode) {
                victim_f = JFFS2_INODE_INFO(new_dentry->d_inode);
-               if (S_ISDIR(new_dentry->d_inode->i_mode)) {
+               if (d_is_dir(new_dentry)) {
                        struct jffs2_full_dirent *fd;
 
                        mutex_lock(&victim_f->sem);
@@ -807,7 +807,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
 
        if (victim_f) {
                /* There was a victim. Kill it off nicely */
-               if (S_ISDIR(new_dentry->d_inode->i_mode))
+               if (d_is_dir(new_dentry))
                        clear_nlink(new_dentry->d_inode);
                else
                        drop_nlink(new_dentry->d_inode);
@@ -815,7 +815,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
                   inode which didn't exist. */
                if (victim_f->inocache) {
                        mutex_lock(&victim_f->sem);
-                       if (S_ISDIR(new_dentry->d_inode->i_mode))
+                       if (d_is_dir(new_dentry))
                                victim_f->inocache->pino_nlink = 0;
                        else
                                victim_f->inocache->pino_nlink--;
@@ -825,7 +825,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
 
        /* If it was a directory we moved, and there was no victim,
           increase i_nlink on its new parent */
-       if (S_ISDIR(old_dentry->d_inode->i_mode) && !victim_f)
+       if (d_is_dir(old_dentry) && !victim_f)
                inc_nlink(new_dir_i);
 
        /* Unlink the original */
@@ -839,7 +839,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
                struct jffs2_inode_info *f = JFFS2_INODE_INFO(old_dentry->d_inode);
                mutex_lock(&f->sem);
                inc_nlink(old_dentry->d_inode);
-               if (f->inocache && !S_ISDIR(old_dentry->d_inode->i_mode))
+               if (f->inocache && !d_is_dir(old_dentry))
                        f->inocache->pino_nlink++;
                mutex_unlock(&f->sem);
 
@@ -852,7 +852,7 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry,
                return ret;
        }
 
-       if (S_ISDIR(old_dentry->d_inode->i_mode))
+       if (d_is_dir(old_dentry))
                drop_nlink(old_dir_i);
 
        new_dir_i->i_mtime = new_dir_i->i_ctime = old_dir_i->i_mtime = old_dir_i->i_ctime = ITIME(now);
index 0918f0e2e26608467356235c4267c336cccee87c..3d76f28a2ba9dc1d9e4b1d5b9dfde57af1b983e6 100644 (file)
@@ -138,7 +138,7 @@ static struct dentry *jffs2_get_parent(struct dentry *child)
        struct jffs2_inode_info *f;
        uint32_t pino;
 
-       BUG_ON(!S_ISDIR(child->d_inode->i_mode));
+       BUG_ON(!d_is_dir(child));
 
        f = JFFS2_INODE_INFO(child->d_inode);
 
index b2ffdb045be42c1c5c476d9298aacd98045f47f3..0ab65122ee45405bfcc96463a3de2b228b36e283 100644 (file)
@@ -329,7 +329,7 @@ int simple_rename(struct inode *old_dir, struct dentry *old_dentry,
                struct inode *new_dir, struct dentry *new_dentry)
 {
        struct inode *inode = old_dentry->d_inode;
-       int they_are_dirs = S_ISDIR(old_dentry->d_inode->i_mode);
+       int they_are_dirs = d_is_dir(old_dentry);
 
        if (!simple_empty(new_dentry))
                return -ENOTEMPTY;
index 96ca11dea4a20c56b89ca126274b68089235346f..c83145af4bfc0ea9bb159002e3545e8a8cd65157 100644 (file)
@@ -2814,7 +2814,7 @@ no_open:
                        } else if (!dentry->d_inode) {
                                goto out;
                        } else if ((open_flag & O_TRUNC) &&
-                                  S_ISREG(dentry->d_inode->i_mode)) {
+                                  d_is_reg(dentry)) {
                                goto out;
                        }
                        /* will fail later, go on to get the right error */
index 72a286e0d33eb37a2ff3cc8a33f7ca5cefcca266..82ef1405260e1cfbe551ffba781fec25f881e918 100644 (file)
@@ -1907,8 +1907,8 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
        if (mnt->mnt.mnt_sb->s_flags & MS_NOUSER)
                return -EINVAL;
 
-       if (S_ISDIR(mp->m_dentry->d_inode->i_mode) !=
-             S_ISDIR(mnt->mnt.mnt_root->d_inode->i_mode))
+       if (d_is_dir(mp->m_dentry) !=
+             d_is_dir(mnt->mnt.mnt_root))
                return -ENOTDIR;
 
        return attach_recursive_mnt(mnt, p, mp, NULL);
@@ -2180,8 +2180,8 @@ static int do_move_mount(struct path *path, const char *old_name)
        if (!mnt_has_parent(old))
                goto out1;
 
-       if (S_ISDIR(path->dentry->d_inode->i_mode) !=
-             S_ISDIR(old_path.dentry->d_inode->i_mode))
+       if (d_is_dir(path->dentry) !=
+             d_is_dir(old_path.dentry))
                goto out1;
        /*
         * Don't move a mount residing in a shared parent.
@@ -2271,7 +2271,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
                goto unlock;
 
        err = -EINVAL;
-       if (S_ISLNK(newmnt->mnt.mnt_root->d_inode->i_mode))
+       if (d_is_symlink(newmnt->mnt.mnt_root))
                goto unlock;
 
        newmnt->mnt.mnt_flags = mnt_flags;
index e36a9d78ea49adc63329253a0a94cb22d6be933a..197806fb87ffb459c19f3c4bbc8da50c58c870dc 100644 (file)
@@ -427,6 +427,8 @@ __be32 nfs4_callback_sequence(struct cb_sequenceargs *args,
        if (clp == NULL)
                goto out;
 
+       if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
+               goto out;
        tbl = &clp->cl_session->bc_slot_table;
 
        spin_lock(&tbl->slot_tbl_lock);
index f4ccfe6521ec80f80fd4b9096bcc7ed49ab7d795..19ca95cdfd9b0f26aedbbc23f036babf2aeca67a 100644 (file)
@@ -313,7 +313,7 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
                goto out;
        }
 
-       args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL);
+       args->devs = kmalloc_array(n, sizeof(*args->devs), GFP_KERNEL);
        if (!args->devs) {
                status = htonl(NFS4ERR_DELAY);
                goto out;
@@ -415,7 +415,7 @@ static __be32 decode_rc_list(struct xdr_stream *xdr,
                             rc_list->rcl_nrefcalls * 2 * sizeof(uint32_t));
                if (unlikely(p == NULL))
                        goto out;
-               rc_list->rcl_refcalls = kmalloc(rc_list->rcl_nrefcalls *
+               rc_list->rcl_refcalls = kmalloc_array(rc_list->rcl_nrefcalls,
                                                sizeof(*rc_list->rcl_refcalls),
                                                GFP_KERNEL);
                if (unlikely(rc_list->rcl_refcalls == NULL))
@@ -464,8 +464,10 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
 
                for (i = 0; i < args->csa_nrclists; i++) {
                        status = decode_rc_list(xdr, &args->csa_rclists[i]);
-                       if (status)
+                       if (status) {
+                               args->csa_nrclists = i;
                                goto out_free;
+                       }
                }
        }
        status = 0;
index da5433230bb1960bc78bafc17ced89f3bca76b65..a1f0685b42ff7d2e42eed249e178ecdbbee7befc 100644 (file)
@@ -180,7 +180,6 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred,
                        delegation->cred = get_rpccred(cred);
                        clear_bit(NFS_DELEGATION_NEED_RECLAIM,
                                  &delegation->flags);
-                       NFS_I(inode)->delegation_state = delegation->type;
                        spin_unlock(&delegation->lock);
                        put_rpccred(oldcred);
                        rcu_read_unlock();
@@ -275,7 +274,6 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi,
        set_bit(NFS_DELEGATION_RETURNING, &delegation->flags);
        list_del_rcu(&delegation->super_list);
        delegation->inode = NULL;
-       nfsi->delegation_state = 0;
        rcu_assign_pointer(nfsi->delegation, NULL);
        spin_unlock(&delegation->lock);
        return delegation;
@@ -355,7 +353,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
                                        &delegation->stateid)) {
                        nfs_update_inplace_delegation(old_delegation,
                                        delegation);
-                       nfsi->delegation_state = old_delegation->type;
                        goto out;
                }
                /*
@@ -379,7 +376,6 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
                        goto out;
        }
        list_add_rcu(&delegation->super_list, &server->delegations);
-       nfsi->delegation_state = delegation->type;
        rcu_assign_pointer(nfsi->delegation, delegation);
        delegation = NULL;
 
index 7077521acdf4609cc57f54da4ce11bb331493632..e907c8cf732e3cff6bc9711ccf0b20c9261cdca2 100644 (file)
@@ -283,7 +283,7 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
                              struct nfs_direct_req *dreq)
 {
-       cinfo->lock = &dreq->lock;
+       cinfo->lock = &dreq->inode->i_lock;
        cinfo->mds = &dreq->mds_cinfo;
        cinfo->ds = &dreq->ds_cinfo;
        cinfo->dreq = dreq;
index 7ae1c263c5cf03b8d63f1fec359d794174564020..91e88a7ecef0c64354b0278fc01381e0970d2086 100644 (file)
@@ -960,52 +960,19 @@ filelayout_mark_request_commit(struct nfs_page *req,
 {
        struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
        u32 i, j;
-       struct list_head *list;
-       struct pnfs_commit_bucket *buckets;
 
        if (fl->commit_through_mds) {
-               list = &cinfo->mds->list;
-               spin_lock(cinfo->lock);
-               goto mds_commit;
-       }
-
-       /* Note that we are calling nfs4_fl_calc_j_index on each page
-        * that ends up being committed to a data server.  An attractive
-        * alternative is to add a field to nfs_write_data and nfs_page
-        * to store the value calculated in filelayout_write_pagelist
-        * and just use that here.
-        */
-       j = nfs4_fl_calc_j_index(lseg, req_offset(req));
-       i = select_bucket_index(fl, j);
-       spin_lock(cinfo->lock);
-       buckets = cinfo->ds->buckets;
-       list = &buckets[i].written;
-       if (list_empty(list)) {
-               /* Non-empty buckets hold a reference on the lseg.  That ref
-                * is normally transferred to the COMMIT call and released
-                * there.  It could also be released if the last req is pulled
-                * off due to a rewrite, in which case it will be done in
-                * pnfs_generic_clear_request_commit
+               nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
+       } else {
+               /* Note that we are calling nfs4_fl_calc_j_index on each page
+                * that ends up being committed to a data server.  An attractive
+                * alternative is to add a field to nfs_write_data and nfs_page
+                * to store the value calculated in filelayout_write_pagelist
+                * and just use that here.
                 */
-               buckets[i].wlseg = pnfs_get_lseg(lseg);
-       }
-       set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
-       cinfo->ds->nwritten++;
-
-mds_commit:
-       /* nfs_request_add_commit_list(). We need to add req to list without
-        * dropping cinfo lock.
-        */
-       set_bit(PG_CLEAN, &(req)->wb_flags);
-       nfs_list_add_request(req, list);
-       cinfo->mds->ncommit++;
-       spin_unlock(cinfo->lock);
-       if (!cinfo->dreq) {
-               inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-               inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-                            BDI_RECLAIMABLE);
-               __mark_inode_dirty(req->wb_context->dentry->d_inode,
-                                  I_DIRTY_DATASYNC);
+               j = nfs4_fl_calc_j_index(lseg, req_offset(req));
+               i = select_bucket_index(fl, j);
+               pnfs_layout_mark_request_commit(req, lseg, cinfo, i);
        }
 }
 
index c22ecaa86c1c27cc2138f1853c27757b11104f17..315cc68945b9d1d3ad00b04e2597e38d7b7c4cab 100644 (file)
@@ -1332,47 +1332,6 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
        return PNFS_ATTEMPTED;
 }
 
-static void
-ff_layout_mark_request_commit(struct nfs_page *req,
-                             struct pnfs_layout_segment *lseg,
-                             struct nfs_commit_info *cinfo,
-                             u32 ds_commit_idx)
-{
-       struct list_head *list;
-       struct pnfs_commit_bucket *buckets;
-
-       spin_lock(cinfo->lock);
-       buckets = cinfo->ds->buckets;
-       list = &buckets[ds_commit_idx].written;
-       if (list_empty(list)) {
-               /* Non-empty buckets hold a reference on the lseg.  That ref
-                * is normally transferred to the COMMIT call and released
-                * there.  It could also be released if the last req is pulled
-                * off due to a rewrite, in which case it will be done in
-                * pnfs_common_clear_request_commit
-                */
-               WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
-               buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
-       }
-       set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
-       cinfo->ds->nwritten++;
-
-       /* nfs_request_add_commit_list(). We need to add req to list without
-        * dropping cinfo lock.
-        */
-       set_bit(PG_CLEAN, &(req)->wb_flags);
-       nfs_list_add_request(req, list);
-       cinfo->mds->ncommit++;
-       spin_unlock(cinfo->lock);
-       if (!cinfo->dreq) {
-               inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-               inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-                            BDI_RECLAIMABLE);
-               __mark_inode_dirty(req->wb_context->dentry->d_inode,
-                                  I_DIRTY_DATASYNC);
-       }
-}
-
 static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
 {
        return i;
@@ -1540,7 +1499,7 @@ static struct pnfs_layoutdriver_type flexfilelayout_type = {
        .pg_write_ops           = &ff_layout_pg_write_ops,
        .get_ds_info            = ff_layout_get_ds_info,
        .free_deviceid_node     = ff_layout_free_deveiceid_node,
-       .mark_request_commit    = ff_layout_mark_request_commit,
+       .mark_request_commit    = pnfs_layout_mark_request_commit,
        .clear_request_commit   = pnfs_generic_clear_request_commit,
        .scan_commit_lists      = pnfs_generic_scan_commit_lists,
        .recover_commit_reqs    = pnfs_generic_recover_commit_reqs,
index e4f0dcef8f5455e60676bf70d9d0489107ad7c79..83107be3dd0109ab54c5f2b0c72fa59c64dd8d57 100644 (file)
@@ -1775,7 +1775,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
 #if IS_ENABLED(CONFIG_NFS_V4)
        INIT_LIST_HEAD(&nfsi->open_states);
        nfsi->delegation = NULL;
-       nfsi->delegation_state = 0;
        init_rwsem(&nfsi->rwsem);
        nfsi->layout = NULL;
 #endif
index 212b8c883d22881b4258c2c7f7e611d9d85f0427..b802fb3a2d99ffd76a57aecc6ce6739ca104e515 100644 (file)
@@ -597,6 +597,19 @@ void nfs_super_set_maxbytes(struct super_block *sb, __u64 maxfilesize)
                sb->s_maxbytes = MAX_LFS_FILESIZE;
 }
 
+/*
+ * Record the page as unstable and mark its inode as dirty.
+ */
+static inline
+void nfs_mark_page_unstable(struct page *page)
+{
+       struct inode *inode = page_file_mapping(page)->host;
+
+       inc_zone_page_state(page, NR_UNSTABLE_NFS);
+       inc_bdi_stat(inode_to_bdi(inode), BDI_RECLAIMABLE);
+        __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
+}
+
 /*
  * Determine the number of bytes of data the page contains
  */
index 2e7c9f7a6f7cc8369bfcefda7a202d34f7a9dbe5..88180ac5ea0eebdf34aa333130e69d7a7b49c38e 100644 (file)
@@ -6648,47 +6648,47 @@ nfs41_same_server_scope(struct nfs41_server_scope *a,
 int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
 {
        int status;
+       struct nfs41_bind_conn_to_session_args args = {
+               .client = clp,
+               .dir = NFS4_CDFC4_FORE_OR_BOTH,
+       };
        struct nfs41_bind_conn_to_session_res res;
        struct rpc_message msg = {
                .rpc_proc =
                        &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
-               .rpc_argp = clp,
+               .rpc_argp = &args,
                .rpc_resp = &res,
                .rpc_cred = cred,
        };
 
        dprintk("--> %s\n", __func__);
 
-       res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
-       if (unlikely(res.session == NULL)) {
-               status = -ENOMEM;
-               goto out;
-       }
+       nfs4_copy_sessionid(&args.sessionid, &clp->cl_session->sess_id);
+       if (!(clp->cl_session->flags & SESSION4_BACK_CHAN))
+               args.dir = NFS4_CDFC4_FORE;
 
        status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
        trace_nfs4_bind_conn_to_session(clp, status);
        if (status == 0) {
-               if (memcmp(res.session->sess_id.data,
+               if (memcmp(res.sessionid.data,
                    clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
                        dprintk("NFS: %s: Session ID mismatch\n", __func__);
                        status = -EIO;
-                       goto out_session;
+                       goto out;
                }
-               if (res.dir != NFS4_CDFS4_BOTH) {
+               if ((res.dir & args.dir) != res.dir || res.dir == 0) {
                        dprintk("NFS: %s: Unexpected direction from server\n",
                                __func__);
                        status = -EIO;
-                       goto out_session;
+                       goto out;
                }
-               if (res.use_conn_in_rdma_mode) {
+               if (res.use_conn_in_rdma_mode != args.use_conn_in_rdma_mode) {
                        dprintk("NFS: %s: Server returned RDMA mode = true\n",
                                __func__);
                        status = -EIO;
-                       goto out_session;
+                       goto out;
                }
        }
-out_session:
-       kfree(res.session);
 out:
        dprintk("<-- %s status= %d\n", __func__, status);
        return status;
@@ -7166,10 +7166,11 @@ static void nfs4_init_channel_attrs(struct nfs41_create_session_args *args)
                args->bc_attrs.max_reqs);
 }
 
-static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
+static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args,
+               struct nfs41_create_session_res *res)
 {
        struct nfs4_channel_attrs *sent = &args->fc_attrs;
-       struct nfs4_channel_attrs *rcvd = &session->fc_attrs;
+       struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
 
        if (rcvd->max_resp_sz > sent->max_resp_sz)
                return -EINVAL;
@@ -7188,11 +7189,14 @@ static int nfs4_verify_fore_channel_attrs(struct nfs41_create_session_args *args
        return 0;
 }
 
-static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args, struct nfs4_session *session)
+static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args,
+               struct nfs41_create_session_res *res)
 {
        struct nfs4_channel_attrs *sent = &args->bc_attrs;
-       struct nfs4_channel_attrs *rcvd = &session->bc_attrs;
+       struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
 
+       if (!(res->flags & SESSION4_BACK_CHAN))
+               goto out;
        if (rcvd->max_rqst_sz > sent->max_rqst_sz)
                return -EINVAL;
        if (rcvd->max_resp_sz < sent->max_resp_sz)
@@ -7204,18 +7208,30 @@ static int nfs4_verify_back_channel_attrs(struct nfs41_create_session_args *args
                return -EINVAL;
        if (rcvd->max_reqs != sent->max_reqs)
                return -EINVAL;
+out:
        return 0;
 }
 
 static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
-                                    struct nfs4_session *session)
+                                    struct nfs41_create_session_res *res)
 {
        int ret;
 
-       ret = nfs4_verify_fore_channel_attrs(args, session);
+       ret = nfs4_verify_fore_channel_attrs(args, res);
        if (ret)
                return ret;
-       return nfs4_verify_back_channel_attrs(args, session);
+       return nfs4_verify_back_channel_attrs(args, res);
+}
+
+static void nfs4_update_session(struct nfs4_session *session,
+               struct nfs41_create_session_res *res)
+{
+       nfs4_copy_sessionid(&session->sess_id, &res->sessionid);
+       session->flags = res->flags;
+       memcpy(&session->fc_attrs, &res->fc_attrs, sizeof(session->fc_attrs));
+       if (res->flags & SESSION4_BACK_CHAN)
+               memcpy(&session->bc_attrs, &res->bc_attrs,
+                               sizeof(session->bc_attrs));
 }
 
 static int _nfs4_proc_create_session(struct nfs_client *clp,
@@ -7224,11 +7240,12 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
        struct nfs4_session *session = clp->cl_session;
        struct nfs41_create_session_args args = {
                .client = clp,
+               .clientid = clp->cl_clientid,
+               .seqid = clp->cl_seqid,
                .cb_program = NFS4_CALLBACK,
        };
-       struct nfs41_create_session_res res = {
-               .client = clp,
-       };
+       struct nfs41_create_session_res res;
+
        struct rpc_message msg = {
                .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
                .rpc_argp = &args,
@@ -7245,11 +7262,15 @@ static int _nfs4_proc_create_session(struct nfs_client *clp,
 
        if (!status) {
                /* Verify the session's negotiated channel_attrs values */
-               status = nfs4_verify_channel_attrs(&args, session);
+               status = nfs4_verify_channel_attrs(&args, &res);
                /* Increment the clientid slot sequence id */
-               clp->cl_seqid++;
+               if (clp->cl_seqid == res.seqid)
+                       clp->cl_seqid++;
+               if (status)
+                       goto out;
+               nfs4_update_session(session, &res);
        }
-
+out:
        return status;
 }
 
index e799dc3c3b1db9f7681199907bff5e7bffb3f853..e23366effcfb1e43bcb81983bcbaeacf2e512002 100644 (file)
@@ -450,7 +450,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
        tbl = &ses->fc_slot_table;
        tbl->session = ses;
        status = nfs4_realloc_slot_table(tbl, ses->fc_attrs.max_reqs, 1);
-       if (status) /* -ENOMEM */
+       if (status || !(ses->flags & SESSION4_BACK_CHAN)) /* -ENOMEM */
                return status;
        /* Back channel */
        tbl = &ses->bc_slot_table;
index b34ada9bc6a2d03a677e46cc5ab8c4eb7c1fd838..fc46c745589863425bff271942b76f7812db5aea 100644 (file)
@@ -118,6 +118,12 @@ static inline int nfs4_has_persistent_session(const struct nfs_client *clp)
        return 0;
 }
 
+static inline void nfs4_copy_sessionid(struct nfs4_sessionid *dst,
+               const struct nfs4_sessionid *src)
+{
+       memcpy(dst->data, src->data, NFS4_MAX_SESSIONID_LEN);
+}
+
 #ifdef CONFIG_CRC32
 /*
  * nfs_session_id_hash - calculate the crc32 hash for the session id
index e23a0a664e12d5130162bc320e80a57a5264bc9b..5c399ec41079687791d2fb668d5f6543ce374a2f 100644 (file)
@@ -1715,17 +1715,17 @@ static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, stru
 #if defined(CONFIG_NFS_V4_1)
 /* NFSv4.1 operations */
 static void encode_bind_conn_to_session(struct xdr_stream *xdr,
-                                  struct nfs4_session *session,
+                                  struct nfs41_bind_conn_to_session_args *args,
                                   struct compound_hdr *hdr)
 {
        __be32 *p;
 
        encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION,
                decode_bind_conn_to_session_maxsz, hdr);
-       encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
+       encode_opaque_fixed(xdr, args->sessionid.data, NFS4_MAX_SESSIONID_LEN);
        p = xdr_reserve_space(xdr, 8);
-       *p++ = cpu_to_be32(NFS4_CDFC4_BACK_OR_BOTH);
-       *p = 0; /* use_conn_in_rdma_mode = False */
+       *p++ = cpu_to_be32(args->dir);
+       *p = (args->use_conn_in_rdma_mode) ? cpu_to_be32(1) : cpu_to_be32(0);
 }
 
 static void encode_op_map(struct xdr_stream *xdr, struct nfs4_op_map *op_map)
@@ -1806,8 +1806,8 @@ static void encode_create_session(struct xdr_stream *xdr,
 
        encode_op_hdr(xdr, OP_CREATE_SESSION, decode_create_session_maxsz, hdr);
        p = reserve_space(xdr, 16 + 2*28 + 20 + clnt->cl_nodelen + 12);
-       p = xdr_encode_hyper(p, clp->cl_clientid);
-       *p++ = cpu_to_be32(clp->cl_seqid);                      /*Sequence id */
+       p = xdr_encode_hyper(p, args->clientid);
+       *p++ = cpu_to_be32(args->seqid);                        /*Sequence id */
        *p++ = cpu_to_be32(args->flags);                        /*flags */
 
        /* Fore Channel */
@@ -2734,14 +2734,14 @@ static void nfs4_xdr_enc_fsid_present(struct rpc_rqst *req,
  */
 static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
                                struct xdr_stream *xdr,
-                               struct nfs_client *clp)
+                               struct nfs41_bind_conn_to_session_args *args)
 {
        struct compound_hdr hdr = {
-               .minorversion = clp->cl_mvops->minor_version,
+               .minorversion = args->client->cl_mvops->minor_version,
        };
 
        encode_compound_hdr(xdr, req, &hdr);
-       encode_bind_conn_to_session(xdr, clp->cl_session, &hdr);
+       encode_bind_conn_to_session(xdr, args, &hdr);
        encode_nops(&hdr);
 }
 
@@ -5613,7 +5613,7 @@ static int decode_bind_conn_to_session(struct xdr_stream *xdr,
 
        status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION);
        if (!status)
-               status = decode_sessionid(xdr, &res->session->sess_id);
+               status = decode_sessionid(xdr, &res->sessionid);
        if (unlikely(status))
                return status;
 
@@ -5641,12 +5641,10 @@ static int decode_create_session(struct xdr_stream *xdr,
 {
        __be32 *p;
        int status;
-       struct nfs_client *clp = res->client;
-       struct nfs4_session *session = clp->cl_session;
 
        status = decode_op_hdr(xdr, OP_CREATE_SESSION);
        if (!status)
-               status = decode_sessionid(xdr, &session->sess_id);
+               status = decode_sessionid(xdr, &res->sessionid);
        if (unlikely(status))
                return status;
 
@@ -5654,13 +5652,13 @@ static int decode_create_session(struct xdr_stream *xdr,
        p = xdr_inline_decode(xdr, 8);
        if (unlikely(!p))
                goto out_overflow;
-       clp->cl_seqid = be32_to_cpup(p++);
-       session->flags = be32_to_cpup(p);
+       res->seqid = be32_to_cpup(p++);
+       res->flags = be32_to_cpup(p);
 
        /* Channel attributes */
-       status = decode_chan_attrs(xdr, &session->fc_attrs);
+       status = decode_chan_attrs(xdr, &res->fc_attrs);
        if (!status)
-               status = decode_chan_attrs(xdr, &session->bc_attrs);
+               status = decode_chan_attrs(xdr, &res->bc_attrs);
        return status;
 out_overflow:
        print_overflow_msg(__func__, xdr);
index 797cd6253adf74d809510151080d1d06439f378d..635f0865671cf38b27eea4a49261c9405a4a4731 100644 (file)
@@ -344,6 +344,10 @@ void nfs4_pnfs_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds,
 struct nfs4_pnfs_ds_addr *nfs4_decode_mp_ds_addr(struct net *net,
                                                 struct xdr_stream *xdr,
                                                 gfp_t gfp_flags);
+void pnfs_layout_mark_request_commit(struct nfs_page *req,
+                                    struct pnfs_layout_segment *lseg,
+                                    struct nfs_commit_info *cinfo,
+                                    u32 ds_commit_idx);
 
 static inline bool nfs_have_layout(struct inode *inode)
 {
index fdc4f6562bb7efc65179f970053dfec6a9c38705..54e36b38fb5f89310287635e0838601ad07cf34a 100644 (file)
@@ -838,3 +838,33 @@ out_err:
        return NULL;
 }
 EXPORT_SYMBOL_GPL(nfs4_decode_mp_ds_addr);
+
+void
+pnfs_layout_mark_request_commit(struct nfs_page *req,
+                               struct pnfs_layout_segment *lseg,
+                               struct nfs_commit_info *cinfo,
+                               u32 ds_commit_idx)
+{
+       struct list_head *list;
+       struct pnfs_commit_bucket *buckets;
+
+       spin_lock(cinfo->lock);
+       buckets = cinfo->ds->buckets;
+       list = &buckets[ds_commit_idx].written;
+       if (list_empty(list)) {
+               /* Non-empty buckets hold a reference on the lseg.  That ref
+                * is normally transferred to the COMMIT call and released
+                * there.  It could also be released if the last req is pulled
+                * off due to a rewrite, in which case it will be done in
+                * pnfs_common_clear_request_commit
+                */
+               WARN_ON_ONCE(buckets[ds_commit_idx].wlseg != NULL);
+               buckets[ds_commit_idx].wlseg = pnfs_get_lseg(lseg);
+       }
+       set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
+       cinfo->ds->nwritten++;
+       spin_unlock(cinfo->lock);
+
+       nfs_request_add_commit_list(req, list, cinfo);
+}
+EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
index 88a6d2196ece3bf5ce7a94027dd96e8f25bc9792..595d81e354d18950a21615862b134ca8993f4c6e 100644 (file)
@@ -789,13 +789,8 @@ nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
        nfs_list_add_request(req, dst);
        cinfo->mds->ncommit++;
        spin_unlock(cinfo->lock);
-       if (!cinfo->dreq) {
-               inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-               inc_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-                            BDI_RECLAIMABLE);
-               __mark_inode_dirty(req->wb_context->dentry->d_inode,
-                                  I_DIRTY_DATASYNC);
-       }
+       if (!cinfo->dreq)
+               nfs_mark_page_unstable(req->wb_page);
 }
 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
 
@@ -1605,11 +1600,8 @@ void nfs_retry_commit(struct list_head *page_list,
                req = nfs_list_entry(page_list->next);
                nfs_list_remove_request(req);
                nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
-               if (!cinfo->dreq) {
-                       dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
-                       dec_bdi_stat(inode_to_bdi(page_file_mapping(req->wb_page)->host),
-                                    BDI_RECLAIMABLE);
-               }
+               if (!cinfo->dreq)
+                       nfs_clear_page_commit(req->wb_page);
                nfs_unlock_and_release_request(req);
        }
 }
index cc6a76072009262475c656307e454bb7ad6e44a1..1c307f02baa89e79f0a6d2889631d3ef50ed0a48 100644 (file)
@@ -583,7 +583,7 @@ nfs4_reset_recoverydir(char *recdir)
        if (status)
                return status;
        status = -ENOTDIR;
-       if (S_ISDIR(path.dentry->d_inode->i_mode)) {
+       if (d_is_dir(path.dentry)) {
                strcpy(user_recovery_dirname, recdir);
                status = 0;
        }
@@ -1426,7 +1426,7 @@ nfsd4_client_tracking_init(struct net *net)
        nn->client_tracking_ops = &nfsd4_legacy_tracking_ops;
        status = kern_path(nfs4_recoverydir(), LOOKUP_FOLLOW, &path);
        if (!status) {
-               status = S_ISDIR(path.dentry->d_inode->i_mode);
+               status = d_is_dir(path.dentry);
                path_put(&path);
                if (status)
                        goto do_init;
index 965b478d50fc40a97a85243e478d9e47e3c38a51..e9fa966fc37fe5415f9fba50b61b37d459172303 100644 (file)
@@ -114,8 +114,8 @@ static inline __be32 check_pseudo_root(struct svc_rqst *rqstp,
         * We're exposing only the directories and symlinks that have to be
         * traversed on the way to real exports:
         */
-       if (unlikely(!S_ISDIR(dentry->d_inode->i_mode) &&
-                    !S_ISLNK(dentry->d_inode->i_mode)))
+       if (unlikely(!d_is_dir(dentry) &&
+                    !d_is_symlink(dentry)))
                return nfserr_stale;
        /*
         * A pseudoroot export gives permission to access only one
@@ -259,7 +259,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
                goto out;
        }
 
-       if (S_ISDIR(dentry->d_inode->i_mode) &&
+       if (d_is_dir(dentry) &&
                        (dentry->d_flags & DCACHE_DISCONNECTED)) {
                printk("nfsd: find_fh_dentry returned a DISCONNECTED directory: %pd2\n",
                                dentry);
@@ -414,7 +414,7 @@ static inline void _fh_update_old(struct dentry *dentry,
 {
        fh->ofh_ino = ino_t_to_u32(dentry->d_inode->i_ino);
        fh->ofh_generation = dentry->d_inode->i_generation;
-       if (S_ISDIR(dentry->d_inode->i_mode) ||
+       if (d_is_dir(dentry) ||
            (exp->ex_flags & NFSEXP_NOSUBTREECHECK))
                fh->ofh_dirino = 0;
 }
index 5685c679dd93d4371626de7d6107a9ac66a98093..36852658242943051f1a1cac7d4b6a8c1945f84c 100644 (file)
@@ -615,9 +615,9 @@ nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *suppor
        export = fhp->fh_export;
        dentry = fhp->fh_dentry;
 
-       if (S_ISREG(dentry->d_inode->i_mode))
+       if (d_is_reg(dentry))
                map = nfs3_regaccess;
-       else if (S_ISDIR(dentry->d_inode->i_mode))
+       else if (d_is_dir(dentry))
                map = nfs3_diraccess;
        else
                map = nfs3_anyaccess;
@@ -1402,7 +1402,7 @@ do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
 
                switch (createmode) {
                case NFS3_CREATE_UNCHECKED:
-                       if (! S_ISREG(dchild->d_inode->i_mode))
+                       if (! d_is_reg(dchild))
                                goto out;
                        else if (truncp) {
                                /* in nfsv4, we need to treat this case a little
@@ -1615,7 +1615,7 @@ nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
        if (err)
                goto out;
        err = nfserr_isdir;
-       if (S_ISDIR(tfhp->fh_dentry->d_inode->i_mode))
+       if (d_is_dir(tfhp->fh_dentry))
                goto out;
        err = nfserr_perm;
        if (!len)
index 51ceb81072847441135303dd752f57e6a97e2646..9a66ff79ff2781d1c7992dbd3d4ec42a82008f59 100644 (file)
@@ -115,8 +115,8 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
                return false;
 
        /* sorry, fanotify only gives a damn about files and dirs */
-       if (!S_ISREG(path->dentry->d_inode->i_mode) &&
-           !S_ISDIR(path->dentry->d_inode->i_mode))
+       if (!d_is_reg(path->dentry) &&
+           !d_can_lookup(path->dentry))
                return false;
 
        if (inode_mark && vfsmnt_mark) {
@@ -139,7 +139,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
                BUG();
        }
 
-       if (S_ISDIR(path->dentry->d_inode->i_mode) &&
+       if (d_is_dir(path->dentry) &&
            !(marks_mask & FS_ISDIR & ~marks_ignored_mask))
                return false;
 
index ea10a87191072339b1352c01f0261da054701a9b..24f640441bd90977a079aac782768025c68f3712 100644 (file)
@@ -191,7 +191,6 @@ int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
                ovl_set_timestamps(upperdentry, stat);
 
        return err;
-
 }
 
 static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
@@ -385,7 +384,7 @@ int ovl_copy_up(struct dentry *dentry)
                struct kstat stat;
                enum ovl_path_type type = ovl_path_type(dentry);
 
-               if (type != OVL_PATH_LOWER)
+               if (OVL_TYPE_UPPER(type))
                        break;
 
                next = dget(dentry);
@@ -394,7 +393,7 @@ int ovl_copy_up(struct dentry *dentry)
                        parent = dget_parent(next);
 
                        type = ovl_path_type(parent);
-                       if (type != OVL_PATH_LOWER)
+                       if (OVL_TYPE_UPPER(type))
                                break;
 
                        dput(next);
index 8ffc4b980f1b68641c17a7bfb67979f205c658b5..d139405d2bfad7cfd94c735913ecebf221def5b5 100644 (file)
@@ -19,7 +19,7 @@ void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
        int err;
 
        dget(wdentry);
-       if (S_ISDIR(wdentry->d_inode->i_mode))
+       if (d_is_dir(wdentry))
                err = ovl_do_rmdir(wdir, wdentry);
        else
                err = ovl_do_unlink(wdir, wdentry);
@@ -118,14 +118,14 @@ int ovl_create_real(struct inode *dir, struct dentry *newdentry,
 
 static int ovl_set_opaque(struct dentry *upperdentry)
 {
-       return ovl_do_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0);
+       return ovl_do_setxattr(upperdentry, OVL_XATTR_OPAQUE, "y", 1, 0);
 }
 
 static void ovl_remove_opaque(struct dentry *upperdentry)
 {
        int err;
 
-       err = ovl_do_removexattr(upperdentry, ovl_opaque_xattr);
+       err = ovl_do_removexattr(upperdentry, OVL_XATTR_OPAQUE);
        if (err) {
                pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n",
                        upperdentry->d_name.name, err);
@@ -152,7 +152,7 @@ static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
         * correct link count.  nlink=1 seems to pacify 'find' and
         * other utilities.
         */
-       if (type == OVL_PATH_MERGE)
+       if (OVL_TYPE_MERGE(type))
                stat->nlink = 1;
 
        return 0;
@@ -506,7 +506,7 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
        struct dentry *opaquedir = NULL;
        int err;
 
-       if (is_dir) {
+       if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
                opaquedir = ovl_check_empty_and_clear(dentry);
                err = PTR_ERR(opaquedir);
                if (IS_ERR(opaquedir))
@@ -630,7 +630,7 @@ static int ovl_do_remove(struct dentry *dentry, bool is_dir)
                goto out_drop_write;
 
        type = ovl_path_type(dentry);
-       if (type == OVL_PATH_PURE_UPPER) {
+       if (OVL_TYPE_PURE_UPPER(type)) {
                err = ovl_remove_upper(dentry, is_dir);
        } else {
                const struct cred *old_cred;
@@ -693,7 +693,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
        bool new_create = false;
        bool cleanup_whiteout = false;
        bool overwrite = !(flags & RENAME_EXCHANGE);
-       bool is_dir = S_ISDIR(old->d_inode->i_mode);
+       bool is_dir = d_is_dir(old);
        bool new_is_dir = false;
        struct dentry *opaquedir = NULL;
        const struct cred *old_cred = NULL;
@@ -712,7 +712,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
        /* Don't copy up directory trees */
        old_type = ovl_path_type(old);
        err = -EXDEV;
-       if ((old_type == OVL_PATH_LOWER || old_type == OVL_PATH_MERGE) && is_dir)
+       if (OVL_TYPE_MERGE_OR_LOWER(old_type) && is_dir)
                goto out;
 
        if (new->d_inode) {
@@ -720,30 +720,30 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
                if (err)
                        goto out;
 
-               if (S_ISDIR(new->d_inode->i_mode))
+               if (d_is_dir(new))
                        new_is_dir = true;
 
                new_type = ovl_path_type(new);
                err = -EXDEV;
-               if (!overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir)
+               if (!overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir)
                        goto out;
 
                err = 0;
-               if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) {
+               if (!OVL_TYPE_UPPER(new_type) && !OVL_TYPE_UPPER(old_type)) {
                        if (ovl_dentry_lower(old)->d_inode ==
                            ovl_dentry_lower(new)->d_inode)
                                goto out;
                }
-               if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) {
+               if (OVL_TYPE_UPPER(new_type) && OVL_TYPE_UPPER(old_type)) {
                        if (ovl_dentry_upper(old)->d_inode ==
                            ovl_dentry_upper(new)->d_inode)
                                goto out;
                }
        } else {
                if (ovl_dentry_is_opaque(new))
-                       new_type = OVL_PATH_UPPER;
+                       new_type = __OVL_PATH_UPPER;
                else
-                       new_type = OVL_PATH_PURE_UPPER;
+                       new_type = __OVL_PATH_UPPER | __OVL_PATH_PURE;
        }
 
        err = ovl_want_write(old);
@@ -763,8 +763,8 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
                        goto out_drop_write;
        }
 
-       old_opaque = old_type != OVL_PATH_PURE_UPPER;
-       new_opaque = new_type != OVL_PATH_PURE_UPPER;
+       old_opaque = !OVL_TYPE_PURE_UPPER(old_type);
+       new_opaque = !OVL_TYPE_PURE_UPPER(new_type);
 
        if (old_opaque || new_opaque) {
                err = -ENOMEM;
@@ -787,7 +787,7 @@ static int ovl_rename2(struct inode *olddir, struct dentry *old,
                old_cred = override_creds(override_cred);
        }
 
-       if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) {
+       if (overwrite && OVL_TYPE_MERGE_OR_LOWER(new_type) && new_is_dir) {
                opaquedir = ovl_check_empty_and_clear(new);
                err = PTR_ERR(opaquedir);
                if (IS_ERR(opaquedir)) {
index 07d74b24913bdee757377d103427883fb7d12e70..04f1248846877d019625c861b474702177b38ae5 100644 (file)
@@ -205,7 +205,7 @@ static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
 
 static bool ovl_is_private_xattr(const char *name)
 {
-       return strncmp(name, "trusted.overlay.", 14) == 0;
+       return strncmp(name, OVL_XATTR_PRE_NAME, OVL_XATTR_PRE_LEN) == 0;
 }
 
 int ovl_setxattr(struct dentry *dentry, const char *name,
@@ -238,7 +238,10 @@ out:
 static bool ovl_need_xattr_filter(struct dentry *dentry,
                                  enum ovl_path_type type)
 {
-       return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode);
+       if ((type & (__OVL_PATH_PURE | __OVL_PATH_UPPER)) == __OVL_PATH_UPPER)
+               return S_ISDIR(dentry->d_inode->i_mode);
+       else
+               return false;
 }
 
 ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
@@ -299,7 +302,7 @@ int ovl_removexattr(struct dentry *dentry, const char *name)
        if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
                goto out_drop_write;
 
-       if (type == OVL_PATH_LOWER) {
+       if (!OVL_TYPE_UPPER(type)) {
                err = vfs_getxattr(realpath.dentry, name, NULL, 0);
                if (err < 0)
                        goto out_drop_write;
@@ -321,7 +324,7 @@ out:
 static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
                                  struct dentry *realdentry)
 {
-       if (type != OVL_PATH_LOWER)
+       if (OVL_TYPE_UPPER(type))
                return false;
 
        if (special_file(realdentry->d_inode->i_mode))
@@ -430,5 +433,4 @@ struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
        }
 
        return inode;
-
 }
index 814bed33dd078c00ade8ed534ae19e969530ccdb..17ac5afc9ffbce150d03e352fd7aaa99f101aa0c 100644 (file)
 struct ovl_entry;
 
 enum ovl_path_type {
-       OVL_PATH_PURE_UPPER,
-       OVL_PATH_UPPER,
-       OVL_PATH_MERGE,
-       OVL_PATH_LOWER,
+       __OVL_PATH_PURE         = (1 << 0),
+       __OVL_PATH_UPPER        = (1 << 1),
+       __OVL_PATH_MERGE        = (1 << 2),
 };
 
-extern const char *ovl_opaque_xattr;
+#define OVL_TYPE_UPPER(type)   ((type) & __OVL_PATH_UPPER)
+#define OVL_TYPE_MERGE(type)   ((type) & __OVL_PATH_MERGE)
+#define OVL_TYPE_PURE_UPPER(type) ((type) & __OVL_PATH_PURE)
+#define OVL_TYPE_MERGE_OR_LOWER(type) \
+       (OVL_TYPE_MERGE(type) || !OVL_TYPE_UPPER(type))
+
+#define OVL_XATTR_PRE_NAME "trusted.overlay."
+#define OVL_XATTR_PRE_LEN  16
+#define OVL_XATTR_OPAQUE   OVL_XATTR_PRE_NAME"opaque"
 
 static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
 {
@@ -130,6 +137,7 @@ void ovl_dentry_version_inc(struct dentry *dentry);
 void ovl_path_upper(struct dentry *dentry, struct path *path);
 void ovl_path_lower(struct dentry *dentry, struct path *path);
 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
 struct dentry *ovl_dentry_upper(struct dentry *dentry);
 struct dentry *ovl_dentry_lower(struct dentry *dentry);
 struct dentry *ovl_dentry_real(struct dentry *dentry);
index c0205990a9f54d03f77909555e368efc5a90bd6f..907870e81a72e36f4c5abb29fd34e23b4307efc5 100644 (file)
@@ -24,7 +24,6 @@ struct ovl_cache_entry {
        struct list_head l_node;
        struct rb_node node;
        bool is_whiteout;
-       bool is_cursor;
        char name[];
 };
 
@@ -40,6 +39,7 @@ struct ovl_readdir_data {
        struct rb_root root;
        struct list_head *list;
        struct list_head middle;
+       struct dentry *dir;
        int count;
        int err;
 };
@@ -48,7 +48,7 @@ struct ovl_dir_file {
        bool is_real;
        bool is_upper;
        struct ovl_dir_cache *cache;
-       struct ovl_cache_entry cursor;
+       struct list_head *cursor;
        struct file *realfile;
        struct file *upperfile;
 };
@@ -79,23 +79,49 @@ static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
        return NULL;
 }
 
-static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
+static struct ovl_cache_entry *ovl_cache_entry_new(struct dentry *dir,
+                                                  const char *name, int len,
                                                   u64 ino, unsigned int d_type)
 {
        struct ovl_cache_entry *p;
        size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
 
        p = kmalloc(size, GFP_KERNEL);
-       if (p) {
-               memcpy(p->name, name, len);
-               p->name[len] = '\0';
-               p->len = len;
-               p->type = d_type;
-               p->ino = ino;
-               p->is_whiteout = false;
-               p->is_cursor = false;
-       }
+       if (!p)
+               return NULL;
+
+       memcpy(p->name, name, len);
+       p->name[len] = '\0';
+       p->len = len;
+       p->type = d_type;
+       p->ino = ino;
+       p->is_whiteout = false;
+
+       if (d_type == DT_CHR) {
+               struct dentry *dentry;
+               const struct cred *old_cred;
+               struct cred *override_cred;
+
+               override_cred = prepare_creds();
+               if (!override_cred) {
+                       kfree(p);
+                       return NULL;
+               }
+
+               /*
+                * CAP_DAC_OVERRIDE for lookup
+                */
+               cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
+               old_cred = override_creds(override_cred);
 
+               dentry = lookup_one_len(name, dir, len);
+               if (!IS_ERR(dentry)) {
+                       p->is_whiteout = ovl_is_whiteout(dentry);
+                       dput(dentry);
+               }
+               revert_creds(old_cred);
+               put_cred(override_cred);
+       }
        return p;
 }
 
@@ -122,7 +148,7 @@ static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
                        return 0;
        }
 
-       p = ovl_cache_entry_new(name, len, ino, d_type);
+       p = ovl_cache_entry_new(rdd->dir, name, len, ino, d_type);
        if (p == NULL)
                return -ENOMEM;
 
@@ -143,7 +169,7 @@ static int ovl_fill_lower(struct ovl_readdir_data *rdd,
        if (p) {
                list_move_tail(&p->l_node, &rdd->middle);
        } else {
-               p = ovl_cache_entry_new(name, namelen, ino, d_type);
+               p = ovl_cache_entry_new(rdd->dir, name, namelen, ino, d_type);
                if (p == NULL)
                        rdd->err = -ENOMEM;
                else
@@ -168,7 +194,6 @@ static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
 {
        struct ovl_dir_cache *cache = od->cache;
 
-       list_del_init(&od->cursor.l_node);
        WARN_ON(cache->refcount <= 0);
        cache->refcount--;
        if (!cache->refcount) {
@@ -204,6 +229,7 @@ static inline int ovl_dir_read(struct path *realpath,
        if (IS_ERR(realfile))
                return PTR_ERR(realfile);
 
+       rdd->dir = realpath->dentry;
        rdd->ctx.pos = 0;
        do {
                rdd->count = 0;
@@ -227,108 +253,58 @@ static void ovl_dir_reset(struct file *file)
        if (cache && ovl_dentry_version_get(dentry) != cache->version) {
                ovl_cache_put(od, dentry);
                od->cache = NULL;
+               od->cursor = NULL;
        }
-       WARN_ON(!od->is_real && type != OVL_PATH_MERGE);
-       if (od->is_real && type == OVL_PATH_MERGE)
+       WARN_ON(!od->is_real && !OVL_TYPE_MERGE(type));
+       if (od->is_real && OVL_TYPE_MERGE(type))
                od->is_real = false;
 }
 
-static int ovl_dir_mark_whiteouts(struct dentry *dir,
-                                 struct ovl_readdir_data *rdd)
-{
-       struct ovl_cache_entry *p;
-       struct dentry *dentry;
-       const struct cred *old_cred;
-       struct cred *override_cred;
-
-       override_cred = prepare_creds();
-       if (!override_cred) {
-               ovl_cache_free(rdd->list);
-               return -ENOMEM;
-       }
-
-       /*
-        * CAP_DAC_OVERRIDE for lookup
-        */
-       cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
-       old_cred = override_creds(override_cred);
-
-       mutex_lock(&dir->d_inode->i_mutex);
-       list_for_each_entry(p, rdd->list, l_node) {
-               if (p->is_cursor)
-                       continue;
-
-               if (p->type != DT_CHR)
-                       continue;
-
-               dentry = lookup_one_len(p->name, dir, p->len);
-               if (IS_ERR(dentry))
-                       continue;
-
-               p->is_whiteout = ovl_is_whiteout(dentry);
-               dput(dentry);
-       }
-       mutex_unlock(&dir->d_inode->i_mutex);
-
-       revert_creds(old_cred);
-       put_cred(override_cred);
-
-       return 0;
-}
-
 static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
 {
        int err;
-       struct path lowerpath;
-       struct path upperpath;
+       struct path realpath;
        struct ovl_readdir_data rdd = {
                .ctx.actor = ovl_fill_merge,
                .list = list,
                .root = RB_ROOT,
                .is_merge = false,
        };
+       int idx, next;
 
-       ovl_path_lower(dentry, &lowerpath);
-       ovl_path_upper(dentry, &upperpath);
+       for (idx = 0; idx != -1; idx = next) {
+               next = ovl_path_next(idx, dentry, &realpath);
 
-       if (upperpath.dentry) {
-               err = ovl_dir_read(&upperpath, &rdd);
-               if (err)
-                       goto out;
-
-               if (lowerpath.dentry) {
-                       err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd);
+               if (next != -1) {
+                       err = ovl_dir_read(&realpath, &rdd);
                        if (err)
-                               goto out;
+                               break;
+               } else {
+                       /*
+                        * Insert lowest layer entries before upper ones, this
+                        * allows offsets to be reasonably constant
+                        */
+                       list_add(&rdd.middle, rdd.list);
+                       rdd.is_merge = true;
+                       err = ovl_dir_read(&realpath, &rdd);
+                       list_del(&rdd.middle);
                }
        }
-       if (lowerpath.dentry) {
-               /*
-                * Insert lowerpath entries before upperpath ones, this allows
-                * offsets to be reasonably constant
-                */
-               list_add(&rdd.middle, rdd.list);
-               rdd.is_merge = true;
-               err = ovl_dir_read(&lowerpath, &rdd);
-               list_del(&rdd.middle);
-       }
-out:
        return err;
 }
 
 static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
 {
-       struct ovl_cache_entry *p;
+       struct list_head *p;
        loff_t off = 0;
 
-       list_for_each_entry(p, &od->cache->entries, l_node) {
-               if (p->is_cursor)
-                       continue;
+       list_for_each(p, &od->cache->entries) {
                if (off >= pos)
                        break;
                off++;
        }
-       list_move_tail(&od->cursor.l_node, &p->l_node);
+       /* Cursor is safe since the cache is stable */
+       od->cursor = p;
 }
 
 static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
@@ -367,6 +343,7 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
 {
        struct ovl_dir_file *od = file->private_data;
        struct dentry *dentry = file->f_path.dentry;
+       struct ovl_cache_entry *p;
 
        if (!ctx->pos)
                ovl_dir_reset(file);
@@ -385,19 +362,13 @@ static int ovl_iterate(struct file *file, struct dir_context *ctx)
                ovl_seek_cursor(od, ctx->pos);
        }
 
-       while (od->cursor.l_node.next != &od->cache->entries) {
-               struct ovl_cache_entry *p;
-
-               p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node);
-               /* Skip cursors */
-               if (!p->is_cursor) {
-                       if (!p->is_whiteout) {
-                               if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
-                                       break;
-                       }
-                       ctx->pos++;
-               }
-               list_move(&od->cursor.l_node, &p->l_node);
+       while (od->cursor != &od->cache->entries) {
+               p = list_entry(od->cursor, struct ovl_cache_entry, l_node);
+               if (!p->is_whiteout)
+                       if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
+                               break;
+               od->cursor = p->l_node.next;
+               ctx->pos++;
        }
        return 0;
 }
@@ -452,7 +423,7 @@ static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
        /*
         * Need to check if we started out being a lower dir, but got copied up
         */
-       if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) {
+       if (!od->is_upper && OVL_TYPE_UPPER(ovl_path_type(dentry))) {
                struct inode *inode = file_inode(file);
 
                realfile = lockless_dereference(od->upperfile);
@@ -516,11 +487,9 @@ static int ovl_dir_open(struct inode *inode, struct file *file)
                kfree(od);
                return PTR_ERR(realfile);
        }
-       INIT_LIST_HEAD(&od->cursor.l_node);
        od->realfile = realfile;
-       od->is_real = (type != OVL_PATH_MERGE);
-       od->is_upper = (type != OVL_PATH_LOWER);
-       od->cursor.is_cursor = true;
+       od->is_real = !OVL_TYPE_MERGE(type);
+       od->is_upper = OVL_TYPE_UPPER(type);
        file->private_data = od;
 
        return 0;
index f16d318b71f8bbe4e77f8a3214e616101848e49c..b90952f528b1cdf7414e49b5613e439d6b89fb7a 100644 (file)
@@ -35,7 +35,8 @@ struct ovl_config {
 /* private information held for overlayfs's superblock */
 struct ovl_fs {
        struct vfsmount *upper_mnt;
-       struct vfsmount *lower_mnt;
+       unsigned numlower;
+       struct vfsmount **lower_mnt;
        struct dentry *workdir;
        long lower_namelen;
        /* pathnames of lower and upper dirs, for show_options */
@@ -47,7 +48,6 @@ struct ovl_dir_cache;
 /* private information held for every overlayfs dentry */
 struct ovl_entry {
        struct dentry *__upperdentry;
-       struct dentry *lowerdentry;
        struct ovl_dir_cache *cache;
        union {
                struct {
@@ -56,30 +56,36 @@ struct ovl_entry {
                };
                struct rcu_head rcu;
        };
+       unsigned numlower;
+       struct path lowerstack[];
 };
 
-const char *ovl_opaque_xattr = "trusted.overlay.opaque";
+#define OVL_MAX_STACK 500
 
+static struct dentry *__ovl_dentry_lower(struct ovl_entry *oe)
+{
+       return oe->numlower ? oe->lowerstack[0].dentry : NULL;
+}
 
 enum ovl_path_type ovl_path_type(struct dentry *dentry)
 {
        struct ovl_entry *oe = dentry->d_fsdata;
+       enum ovl_path_type type = 0;
 
        if (oe->__upperdentry) {
-               if (oe->lowerdentry) {
+               type = __OVL_PATH_UPPER;
+
+               if (oe->numlower) {
                        if (S_ISDIR(dentry->d_inode->i_mode))
-                               return OVL_PATH_MERGE;
-                       else
-                               return OVL_PATH_UPPER;
-               } else {
-                       if (oe->opaque)
-                               return OVL_PATH_UPPER;
-                       else
-                               return OVL_PATH_PURE_UPPER;
+                               type |= __OVL_PATH_MERGE;
+               } else if (!oe->opaque) {
+                       type |= __OVL_PATH_PURE;
                }
        } else {
-               return OVL_PATH_LOWER;
+               if (oe->numlower > 1)
+                       type |= __OVL_PATH_MERGE;
        }
+       return type;
 }
 
 static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
@@ -98,10 +104,9 @@ void ovl_path_upper(struct dentry *dentry, struct path *path)
 
 enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
 {
-
        enum ovl_path_type type = ovl_path_type(dentry);
 
-       if (type == OVL_PATH_LOWER)
+       if (!OVL_TYPE_UPPER(type))
                ovl_path_lower(dentry, path);
        else
                ovl_path_upper(dentry, path);
@@ -120,7 +125,7 @@ struct dentry *ovl_dentry_lower(struct dentry *dentry)
 {
        struct ovl_entry *oe = dentry->d_fsdata;
 
-       return oe->lowerdentry;
+       return __ovl_dentry_lower(oe);
 }
 
 struct dentry *ovl_dentry_real(struct dentry *dentry)
@@ -130,7 +135,7 @@ struct dentry *ovl_dentry_real(struct dentry *dentry)
 
        realdentry = ovl_upperdentry_dereference(oe);
        if (!realdentry)
-               realdentry = oe->lowerdentry;
+               realdentry = __ovl_dentry_lower(oe);
 
        return realdentry;
 }
@@ -143,7 +148,7 @@ struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper)
        if (realdentry) {
                *is_upper = true;
        } else {
-               realdentry = oe->lowerdentry;
+               realdentry = __ovl_dentry_lower(oe);
                *is_upper = false;
        }
        return realdentry;
@@ -165,11 +170,9 @@ void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache)
 
 void ovl_path_lower(struct dentry *dentry, struct path *path)
 {
-       struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
        struct ovl_entry *oe = dentry->d_fsdata;
 
-       path->mnt = ofs->lower_mnt;
-       path->dentry = oe->lowerdentry;
+       *path = oe->numlower ? oe->lowerstack[0] : (struct path) { NULL, NULL };
 }
 
 int ovl_want_write(struct dentry *dentry)
@@ -249,7 +252,7 @@ static bool ovl_is_opaquedir(struct dentry *dentry)
        if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr)
                return false;
 
-       res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1);
+       res = inode->i_op->getxattr(dentry, OVL_XATTR_OPAQUE, &val, 1);
        if (res == 1 && val == 'y')
                return true;
 
@@ -261,8 +264,11 @@ static void ovl_dentry_release(struct dentry *dentry)
        struct ovl_entry *oe = dentry->d_fsdata;
 
        if (oe) {
+               unsigned int i;
+
                dput(oe->__upperdentry);
-               dput(oe->lowerdentry);
+               for (i = 0; i < oe->numlower; i++)
+                       dput(oe->lowerstack[i].dentry);
                kfree_rcu(oe, rcu);
        }
 }
@@ -271,9 +277,15 @@ static const struct dentry_operations ovl_dentry_operations = {
        .d_release = ovl_dentry_release,
 };
 
-static struct ovl_entry *ovl_alloc_entry(void)
+static struct ovl_entry *ovl_alloc_entry(unsigned int numlower)
 {
-       return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL);
+       size_t size = offsetof(struct ovl_entry, lowerstack[numlower]);
+       struct ovl_entry *oe = kzalloc(size, GFP_KERNEL);
+
+       if (oe)
+               oe->numlower = numlower;
+
+       return oe;
 }
 
 static inline struct dentry *ovl_lookup_real(struct dentry *dir,
@@ -295,82 +307,154 @@ static inline struct dentry *ovl_lookup_real(struct dentry *dir,
        return dentry;
 }
 
+/*
+ * Returns next layer in stack starting from top.
+ * Returns -1 if this is the last layer.
+ */
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
+{
+       struct ovl_entry *oe = dentry->d_fsdata;
+
+       BUG_ON(idx < 0);
+       if (idx == 0) {
+               ovl_path_upper(dentry, path);
+               if (path->dentry)
+                       return oe->numlower ? 1 : -1;
+               idx++;
+       }
+       BUG_ON(idx > oe->numlower);
+       *path = oe->lowerstack[idx - 1];
+
+       return (idx < oe->numlower) ? idx + 1 : -1;
+}
+
 struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                          unsigned int flags)
 {
        struct ovl_entry *oe;
-       struct dentry *upperdir;
-       struct dentry *lowerdir;
-       struct dentry *upperdentry = NULL;
-       struct dentry *lowerdentry = NULL;
+       struct ovl_entry *poe = dentry->d_parent->d_fsdata;
+       struct path *stack = NULL;
+       struct dentry *upperdir, *upperdentry = NULL;
+       unsigned int ctr = 0;
        struct inode *inode = NULL;
+       bool upperopaque = false;
+       struct dentry *this, *prev = NULL;
+       unsigned int i;
        int err;
 
-       err = -ENOMEM;
-       oe = ovl_alloc_entry();
-       if (!oe)
-               goto out;
-
-       upperdir = ovl_dentry_upper(dentry->d_parent);
-       lowerdir = ovl_dentry_lower(dentry->d_parent);
-
+       upperdir = ovl_upperdentry_dereference(poe);
        if (upperdir) {
-               upperdentry = ovl_lookup_real(upperdir, &dentry->d_name);
-               err = PTR_ERR(upperdentry);
-               if (IS_ERR(upperdentry))
-                       goto out_put_dir;
-
-               if (lowerdir && upperdentry) {
-                       if (ovl_is_whiteout(upperdentry)) {
-                               dput(upperdentry);
-                               upperdentry = NULL;
-                               oe->opaque = true;
-                       } else if (ovl_is_opaquedir(upperdentry)) {
-                               oe->opaque = true;
+               this = ovl_lookup_real(upperdir, &dentry->d_name);
+               err = PTR_ERR(this);
+               if (IS_ERR(this))
+                       goto out;
+
+               if (this) {
+                       if (ovl_is_whiteout(this)) {
+                               dput(this);
+                               this = NULL;
+                               upperopaque = true;
+                       } else if (poe->numlower && ovl_is_opaquedir(this)) {
+                               upperopaque = true;
                        }
                }
+               upperdentry = prev = this;
        }
-       if (lowerdir && !oe->opaque) {
-               lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name);
-               err = PTR_ERR(lowerdentry);
-               if (IS_ERR(lowerdentry))
-                       goto out_dput_upper;
+
+       if (!upperopaque && poe->numlower) {
+               err = -ENOMEM;
+               stack = kcalloc(poe->numlower, sizeof(struct path), GFP_KERNEL);
+               if (!stack)
+                       goto out_put_upper;
        }
 
-       if (lowerdentry && upperdentry &&
-           (!S_ISDIR(upperdentry->d_inode->i_mode) ||
-            !S_ISDIR(lowerdentry->d_inode->i_mode))) {
-               dput(lowerdentry);
-               lowerdentry = NULL;
-               oe->opaque = true;
+       for (i = 0; !upperopaque && i < poe->numlower; i++) {
+               bool opaque = false;
+               struct path lowerpath = poe->lowerstack[i];
+
+               this = ovl_lookup_real(lowerpath.dentry, &dentry->d_name);
+               err = PTR_ERR(this);
+               if (IS_ERR(this)) {
+                       /*
+                        * If it's positive, then treat ENAMETOOLONG as ENOENT.
+                        */
+                       if (err == -ENAMETOOLONG && (upperdentry || ctr))
+                               continue;
+                       goto out_put;
+               }
+               if (!this)
+                       continue;
+               if (ovl_is_whiteout(this)) {
+                       dput(this);
+                       break;
+               }
+               /*
+                * Only makes sense to check opaque dir if this is not the
+                * lowermost layer.
+                */
+               if (i < poe->numlower - 1 && ovl_is_opaquedir(this))
+                       opaque = true;
+
+               if (prev && (!S_ISDIR(prev->d_inode->i_mode) ||
+                            !S_ISDIR(this->d_inode->i_mode))) {
+                       /*
+                        * FIXME: check for upper-opaqueness maybe better done
+                        * in remove code.
+                        */
+                       if (prev == upperdentry)
+                               upperopaque = true;
+                       dput(this);
+                       break;
+               }
+               /*
+                * If this is a non-directory then stop here.
+                */
+               if (!S_ISDIR(this->d_inode->i_mode))
+                       opaque = true;
+
+               stack[ctr].dentry = this;
+               stack[ctr].mnt = lowerpath.mnt;
+               ctr++;
+               prev = this;
+               if (opaque)
+                       break;
        }
 
-       if (lowerdentry || upperdentry) {
+       oe = ovl_alloc_entry(ctr);
+       err = -ENOMEM;
+       if (!oe)
+               goto out_put;
+
+       if (upperdentry || ctr) {
                struct dentry *realdentry;
 
-               realdentry = upperdentry ? upperdentry : lowerdentry;
+               realdentry = upperdentry ? upperdentry : stack[0].dentry;
+
                err = -ENOMEM;
                inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode,
                                      oe);
                if (!inode)
-                       goto out_dput;
+                       goto out_free_oe;
                ovl_copyattr(realdentry->d_inode, inode);
        }
 
+       oe->opaque = upperopaque;
        oe->__upperdentry = upperdentry;
-       oe->lowerdentry = lowerdentry;
-
+       memcpy(oe->lowerstack, stack, sizeof(struct path) * ctr);
+       kfree(stack);
        dentry->d_fsdata = oe;
        d_add(dentry, inode);
 
        return NULL;
 
-out_dput:
-       dput(lowerdentry);
-out_dput_upper:
-       dput(upperdentry);
-out_put_dir:
+out_free_oe:
        kfree(oe);
+out_put:
+       for (i = 0; i < ctr; i++)
+               dput(stack[i].dentry);
+       kfree(stack);
+out_put_upper:
+       dput(upperdentry);
 out:
        return ERR_PTR(err);
 }
@@ -383,10 +467,12 @@ struct file *ovl_path_open(struct path *path, int flags)
 static void ovl_put_super(struct super_block *sb)
 {
        struct ovl_fs *ufs = sb->s_fs_info;
+       unsigned i;
 
        dput(ufs->workdir);
        mntput(ufs->upper_mnt);
-       mntput(ufs->lower_mnt);
+       for (i = 0; i < ufs->numlower; i++)
+               mntput(ufs->lower_mnt[i]);
 
        kfree(ufs->config.lowerdir);
        kfree(ufs->config.upperdir);
@@ -400,7 +486,7 @@ static void ovl_put_super(struct super_block *sb)
  * @buf: The struct kstatfs to fill in with stats
  *
  * Get the filesystem statistics.  As writes always target the upper layer
- * filesystem pass the statfs to the same filesystem.
+ * filesystem pass the statfs to the upper filesystem (if it exists)
  */
 static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
@@ -409,7 +495,7 @@ static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
        struct path path;
        int err;
 
-       ovl_path_upper(root_dentry, &path);
+       ovl_path_real(root_dentry, &path);
 
        err = vfs_statfs(&path, buf);
        if (!err) {
@@ -432,8 +518,21 @@ static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
        struct ovl_fs *ufs = sb->s_fs_info;
 
        seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
-       seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
-       seq_printf(m, ",workdir=%s", ufs->config.workdir);
+       if (ufs->config.upperdir) {
+               seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
+               seq_printf(m, ",workdir=%s", ufs->config.workdir);
+       }
+       return 0;
+}
+
+static int ovl_remount(struct super_block *sb, int *flags, char *data)
+{
+       struct ovl_fs *ufs = sb->s_fs_info;
+
+       if (!(*flags & MS_RDONLY) &&
+           (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)))
+               return -EROFS;
+
        return 0;
 }
 
@@ -441,6 +540,7 @@ static const struct super_operations ovl_super_operations = {
        .put_super      = ovl_put_super,
        .statfs         = ovl_statfs,
        .show_options   = ovl_show_options,
+       .remount_fs     = ovl_remount,
 };
 
 enum {
@@ -585,24 +685,6 @@ static void ovl_unescape(char *s)
        }
 }
 
-static int ovl_mount_dir(const char *name, struct path *path)
-{
-       int err;
-       char *tmp = kstrdup(name, GFP_KERNEL);
-
-       if (!tmp)
-               return -ENOMEM;
-
-       ovl_unescape(tmp);
-       err = kern_path(tmp, LOOKUP_FOLLOW, path);
-       if (err) {
-               pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err);
-               err = -EINVAL;
-       }
-       kfree(tmp);
-       return err;
-}
-
 static bool ovl_is_allowed_fs_type(struct dentry *root)
 {
        const struct dentry_operations *dop = root->d_op;
@@ -622,6 +704,75 @@ static bool ovl_is_allowed_fs_type(struct dentry *root)
        return true;
 }
 
+static int ovl_mount_dir_noesc(const char *name, struct path *path)
+{
+       int err = -EINVAL;
+
+       if (!*name) {
+               pr_err("overlayfs: empty lowerdir\n");
+               goto out;
+       }
+       err = kern_path(name, LOOKUP_FOLLOW, path);
+       if (err) {
+               pr_err("overlayfs: failed to resolve '%s': %i\n", name, err);
+               goto out;
+       }
+       err = -EINVAL;
+       if (!ovl_is_allowed_fs_type(path->dentry)) {
+               pr_err("overlayfs: filesystem on '%s' not supported\n", name);
+               goto out_put;
+       }
+       if (!S_ISDIR(path->dentry->d_inode->i_mode)) {
+               pr_err("overlayfs: '%s' not a directory\n", name);
+               goto out_put;
+       }
+       return 0;
+
+out_put:
+       path_put(path);
+out:
+       return err;
+}
+
+static int ovl_mount_dir(const char *name, struct path *path)
+{
+       int err = -ENOMEM;
+       char *tmp = kstrdup(name, GFP_KERNEL);
+
+       if (tmp) {
+               ovl_unescape(tmp);
+               err = ovl_mount_dir_noesc(tmp, path);
+               kfree(tmp);
+       }
+       return err;
+}
+
+static int ovl_lower_dir(const char *name, struct path *path, long *namelen,
+                        int *stack_depth)
+{
+       int err;
+       struct kstatfs statfs;
+
+       err = ovl_mount_dir_noesc(name, path);
+       if (err)
+               goto out;
+
+       err = vfs_statfs(path, &statfs);
+       if (err) {
+               pr_err("overlayfs: statfs failed on '%s'\n", name);
+               goto out_put;
+       }
+       *namelen = max(*namelen, statfs.f_namelen);
+       *stack_depth = max(*stack_depth, path->mnt->mnt_sb->s_stack_depth);
+
+       return 0;
+
+out_put:
+       path_put(path);
+out:
+       return err;
+}
+
 /* Workdir should not be subdir of upperdir and vice versa */
 static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
 {
@@ -634,16 +785,39 @@ static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
        return ok;
 }
 
+static unsigned int ovl_split_lowerdirs(char *str)
+{
+       unsigned int ctr = 1;
+       char *s, *d;
+
+       for (s = d = str;; s++, d++) {
+               if (*s == '\\') {
+                       s++;
+               } else if (*s == ':') {
+                       *d = '\0';
+                       ctr++;
+                       continue;
+               }
+               *d = *s;
+               if (!*s)
+                       break;
+       }
+       return ctr;
+}
+
 static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 {
-       struct path lowerpath;
-       struct path upperpath;
-       struct path workpath;
-       struct inode *root_inode;
+       struct path upperpath = { NULL, NULL };
+       struct path workpath = { NULL, NULL };
        struct dentry *root_dentry;
        struct ovl_entry *oe;
        struct ovl_fs *ufs;
-       struct kstatfs statfs;
+       struct path *stack = NULL;
+       char *lowertmp;
+       char *lower;
+       unsigned int numlower;
+       unsigned int stacklen = 0;
+       unsigned int i;
        int err;
 
        err = -ENOMEM;
@@ -655,123 +829,135 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
        if (err)
                goto out_free_config;
 
-       /* FIXME: workdir is not needed for a R/O mount */
        err = -EINVAL;
-       if (!ufs->config.upperdir || !ufs->config.lowerdir ||
-           !ufs->config.workdir) {
-               pr_err("overlayfs: missing upperdir or lowerdir or workdir\n");
+       if (!ufs->config.lowerdir) {
+               pr_err("overlayfs: missing 'lowerdir'\n");
                goto out_free_config;
        }
 
-       err = -ENOMEM;
-       oe = ovl_alloc_entry();
-       if (oe == NULL)
-               goto out_free_config;
-
-       err = ovl_mount_dir(ufs->config.upperdir, &upperpath);
-       if (err)
-               goto out_free_oe;
-
-       err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath);
-       if (err)
-               goto out_put_upperpath;
+       sb->s_stack_depth = 0;
+       if (ufs->config.upperdir) {
+               /* FIXME: workdir is not needed for a R/O mount */
+               if (!ufs->config.workdir) {
+                       pr_err("overlayfs: missing 'workdir'\n");
+                       goto out_free_config;
+               }
 
-       err = ovl_mount_dir(ufs->config.workdir, &workpath);
-       if (err)
-               goto out_put_lowerpath;
+               err = ovl_mount_dir(ufs->config.upperdir, &upperpath);
+               if (err)
+                       goto out_free_config;
 
-       err = -EINVAL;
-       if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) ||
-           !S_ISDIR(lowerpath.dentry->d_inode->i_mode) ||
-           !S_ISDIR(workpath.dentry->d_inode->i_mode)) {
-               pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n");
-               goto out_put_workpath;
-       }
+               err = ovl_mount_dir(ufs->config.workdir, &workpath);
+               if (err)
+                       goto out_put_upperpath;
 
-       if (upperpath.mnt != workpath.mnt) {
-               pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
-               goto out_put_workpath;
-       }
-       if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) {
-               pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
-               goto out_put_workpath;
+               err = -EINVAL;
+               if (upperpath.mnt != workpath.mnt) {
+                       pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
+                       goto out_put_workpath;
+               }
+               if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) {
+                       pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
+                       goto out_put_workpath;
+               }
+               sb->s_stack_depth = upperpath.mnt->mnt_sb->s_stack_depth;
        }
-
-       if (!ovl_is_allowed_fs_type(upperpath.dentry)) {
-               pr_err("overlayfs: filesystem of upperdir is not supported\n");
+       err = -ENOMEM;
+       lowertmp = kstrdup(ufs->config.lowerdir, GFP_KERNEL);
+       if (!lowertmp)
                goto out_put_workpath;
-       }
 
-       if (!ovl_is_allowed_fs_type(lowerpath.dentry)) {
-               pr_err("overlayfs: filesystem of lowerdir is not supported\n");
-               goto out_put_workpath;
-       }
+       err = -EINVAL;
+       stacklen = ovl_split_lowerdirs(lowertmp);
+       if (stacklen > OVL_MAX_STACK)
+               goto out_free_lowertmp;
+
+       stack = kcalloc(stacklen, sizeof(struct path), GFP_KERNEL);
+       if (!stack)
+               goto out_free_lowertmp;
+
+       lower = lowertmp;
+       for (numlower = 0; numlower < stacklen; numlower++) {
+               err = ovl_lower_dir(lower, &stack[numlower],
+                                   &ufs->lower_namelen, &sb->s_stack_depth);
+               if (err)
+                       goto out_put_lowerpath;
 
-       err = vfs_statfs(&lowerpath, &statfs);
-       if (err) {
-               pr_err("overlayfs: statfs failed on lowerpath\n");
-               goto out_put_workpath;
+               lower = strchr(lower, '\0') + 1;
        }
-       ufs->lower_namelen = statfs.f_namelen;
-
-       sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth,
-                               lowerpath.mnt->mnt_sb->s_stack_depth) + 1;
 
        err = -EINVAL;
+       sb->s_stack_depth++;
        if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
                pr_err("overlayfs: maximum fs stacking depth exceeded\n");
-               goto out_put_workpath;
+               goto out_put_lowerpath;
        }
 
-       ufs->upper_mnt = clone_private_mount(&upperpath);
-       err = PTR_ERR(ufs->upper_mnt);
-       if (IS_ERR(ufs->upper_mnt)) {
-               pr_err("overlayfs: failed to clone upperpath\n");
-               goto out_put_workpath;
-       }
+       if (ufs->config.upperdir) {
+               ufs->upper_mnt = clone_private_mount(&upperpath);
+               err = PTR_ERR(ufs->upper_mnt);
+               if (IS_ERR(ufs->upper_mnt)) {
+                       pr_err("overlayfs: failed to clone upperpath\n");
+                       goto out_put_lowerpath;
+               }
 
-       ufs->lower_mnt = clone_private_mount(&lowerpath);
-       err = PTR_ERR(ufs->lower_mnt);
-       if (IS_ERR(ufs->lower_mnt)) {
-               pr_err("overlayfs: failed to clone lowerpath\n");
-               goto out_put_upper_mnt;
+               ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
+               err = PTR_ERR(ufs->workdir);
+               if (IS_ERR(ufs->workdir)) {
+                       pr_err("overlayfs: failed to create directory %s/%s\n",
+                              ufs->config.workdir, OVL_WORKDIR_NAME);
+                       goto out_put_upper_mnt;
+               }
        }
 
-       ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
-       err = PTR_ERR(ufs->workdir);
-       if (IS_ERR(ufs->workdir)) {
-               pr_err("overlayfs: failed to create directory %s/%s\n",
-                      ufs->config.workdir, OVL_WORKDIR_NAME);
-               goto out_put_lower_mnt;
-       }
+       err = -ENOMEM;
+       ufs->lower_mnt = kcalloc(numlower, sizeof(struct vfsmount *), GFP_KERNEL);
+       if (ufs->lower_mnt == NULL)
+               goto out_put_workdir;
+       for (i = 0; i < numlower; i++) {
+               struct vfsmount *mnt = clone_private_mount(&stack[i]);
 
-       /*
-        * Make lower_mnt R/O.  That way fchmod/fchown on lower file
-        * will fail instead of modifying lower fs.
-        */
-       ufs->lower_mnt->mnt_flags |= MNT_READONLY;
+               err = PTR_ERR(mnt);
+               if (IS_ERR(mnt)) {
+                       pr_err("overlayfs: failed to clone lowerpath\n");
+                       goto out_put_lower_mnt;
+               }
+               /*
+                * Make lower_mnt R/O.  That way fchmod/fchown on lower file
+                * will fail instead of modifying lower fs.
+                */
+               mnt->mnt_flags |= MNT_READONLY;
+
+               ufs->lower_mnt[ufs->numlower] = mnt;
+               ufs->numlower++;
+       }
 
-       /* If the upper fs is r/o, we mark overlayfs r/o too */
-       if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)
+       /* If the upper fs is r/o or nonexistent, we mark overlayfs r/o too */
+       if (!ufs->upper_mnt || (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY))
                sb->s_flags |= MS_RDONLY;
 
        sb->s_d_op = &ovl_dentry_operations;
 
        err = -ENOMEM;
-       root_inode = ovl_new_inode(sb, S_IFDIR, oe);
-       if (!root_inode)
-               goto out_put_workdir;
+       oe = ovl_alloc_entry(numlower);
+       if (!oe)
+               goto out_put_lower_mnt;
 
-       root_dentry = d_make_root(root_inode);
+       root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, oe));
        if (!root_dentry)
-               goto out_put_workdir;
+               goto out_free_oe;
 
        mntput(upperpath.mnt);
-       mntput(lowerpath.mnt);
+       for (i = 0; i < numlower; i++)
+               mntput(stack[i].mnt);
        path_put(&workpath);
+       kfree(lowertmp);
 
        oe->__upperdentry = upperpath.dentry;
-       oe->lowerdentry = lowerpath.dentry;
+       for (i = 0; i < numlower; i++) {
+               oe->lowerstack[i].dentry = stack[i].dentry;
+               oe->lowerstack[i].mnt = ufs->lower_mnt[i];
+       }
 
        root_dentry->d_fsdata = oe;
 
@@ -782,20 +968,26 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
 
        return 0;
 
+out_free_oe:
+       kfree(oe);
+out_put_lower_mnt:
+       for (i = 0; i < ufs->numlower; i++)
+               mntput(ufs->lower_mnt[i]);
+       kfree(ufs->lower_mnt);
 out_put_workdir:
        dput(ufs->workdir);
-out_put_lower_mnt:
-       mntput(ufs->lower_mnt);
 out_put_upper_mnt:
        mntput(ufs->upper_mnt);
+out_put_lowerpath:
+       for (i = 0; i < numlower; i++)
+               path_put(&stack[i]);
+       kfree(stack);
+out_free_lowertmp:
+       kfree(lowertmp);
 out_put_workpath:
        path_put(&workpath);
-out_put_lowerpath:
-       path_put(&lowerpath);
 out_put_upperpath:
        path_put(&upperpath);
-out_free_oe:
-       kfree(oe);
 out_free_config:
        kfree(ufs->config.lowerdir);
        kfree(ufs->config.upperdir);
index 0855f772cd41599d6c1d1091e7da616d32cccf53..3a48bb789c9f3e4eb227214f72798b90c52af5dc 100644 (file)
@@ -564,13 +564,11 @@ posix_acl_create(struct inode *dir, umode_t *mode,
 
        *acl = posix_acl_clone(p, GFP_NOFS);
        if (!*acl)
-               return -ENOMEM;
+               goto no_mem;
 
        ret = posix_acl_create_masq(*acl, mode);
-       if (ret < 0) {
-               posix_acl_release(*acl);
-               return -ENOMEM;
-       }
+       if (ret < 0)
+               goto no_mem_clone;
 
        if (ret == 0) {
                posix_acl_release(*acl);
@@ -591,6 +589,12 @@ no_acl:
        *default_acl = NULL;
        *acl = NULL;
        return 0;
+
+no_mem_clone:
+       posix_acl_release(*acl);
+no_mem:
+       posix_acl_release(p);
+       return -ENOMEM;
 }
 EXPORT_SYMBOL_GPL(posix_acl_create);
 
@@ -772,7 +776,7 @@ posix_acl_xattr_get(struct dentry *dentry, const char *name,
 
        if (!IS_POSIXACL(dentry->d_inode))
                return -EOPNOTSUPP;
-       if (S_ISLNK(dentry->d_inode->i_mode))
+       if (d_is_symlink(dentry))
                return -EOPNOTSUPP;
 
        acl = get_acl(dentry->d_inode, type);
@@ -832,7 +836,7 @@ posix_acl_xattr_list(struct dentry *dentry, char *list, size_t list_size,
 
        if (!IS_POSIXACL(dentry->d_inode))
                return -EOPNOTSUPP;
-       if (S_ISLNK(dentry->d_inode->i_mode))
+       if (d_is_symlink(dentry))
                return -EOPNOTSUPP;
 
        if (type == ACL_TYPE_ACCESS)
index 3309f59d421ba6e5b806651dcc1d926143470953..be65b208213518f267d9fe16b5652b5021700cf5 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/mount.h>
 #include <linux/init.h>
 #include <linux/idr.h>
-#include <linux/namei.h>
 #include <linux/bitops.h>
 #include <linux/spinlock.h>
 #include <linux/completion.h>
@@ -223,17 +222,6 @@ void proc_free_inum(unsigned int inum)
        spin_unlock_irqrestore(&proc_inum_lock, flags);
 }
 
-static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       nd_set_link(nd, __PDE_DATA(dentry->d_inode));
-       return NULL;
-}
-
-static const struct inode_operations proc_link_inode_operations = {
-       .readlink       = generic_readlink,
-       .follow_link    = proc_follow_link,
-};
-
 /*
  * Don't create negative dentries here, return -ENOENT by hand
  * instead.
index 13a50a32652dc868ab94084a1e07fb4a809e43da..7697b6621cfd5b13051318ba15920646942ed528 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/slab.h>
 #include <linux/mount.h>
 #include <linux/magic.h>
+#include <linux/namei.h>
 
 #include <asm/uaccess.h>
 
@@ -393,6 +394,26 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
 };
 #endif
 
+static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
+{
+       struct proc_dir_entry *pde = PDE(dentry->d_inode);
+       if (unlikely(!use_pde(pde)))
+               return ERR_PTR(-EINVAL);
+       nd_set_link(nd, pde->data);
+       return pde;
+}
+
+static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+{
+       unuse_pde(p);
+}
+
+const struct inode_operations proc_link_inode_operations = {
+       .readlink       = generic_readlink,
+       .follow_link    = proc_follow_link,
+       .put_link       = proc_put_link,
+};
+
 struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
 {
        struct inode *inode = new_inode_pseudo(sb);
index 6fcdba573e0fa2471e668e96217f366bed749050..c835b94c0cd3afec0bea4017ca8bacd63b32ff8e 100644 (file)
@@ -200,6 +200,7 @@ struct pde_opener {
        int closing;
        struct completion *c;
 };
+extern const struct inode_operations proc_link_inode_operations;
 
 extern const struct inode_operations proc_pid_link_inode_operations;
 
index 04b06146bae224f0f9177da58fee0c6fd6d3e747..4e781e697c90bce3b42f0e0097fe46965bbb5258 100644 (file)
@@ -266,7 +266,7 @@ static int reiserfs_for_each_xattr(struct inode *inode,
                for (i = 0; !err && i < buf.count && buf.dentries[i]; i++) {
                        struct dentry *dentry = buf.dentries[i];
 
-                       if (!S_ISDIR(dentry->d_inode->i_mode))
+                       if (!d_is_dir(dentry))
                                err = action(dentry, data);
 
                        dput(dentry);
@@ -322,7 +322,7 @@ static int delete_one_xattr(struct dentry *dentry, void *data)
        struct inode *dir = dentry->d_parent->d_inode;
 
        /* This is the xattr dir, handle specially. */
-       if (S_ISDIR(dentry->d_inode->i_mode))
+       if (d_is_dir(dentry))
                return xattr_rmdir(dir, dentry);
 
        return xattr_unlink(dir, dentry);
index 65a53efc1cf4a5d5ce5cb7c6eca39fa28b0d2f7f..2b7dc90ccdbb4ae1ceac7725967cec2a643e21c7 100644 (file)
@@ -71,7 +71,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
        if (!(sc->gfp_mask & __GFP_FS))
                return SHRINK_STOP;
 
-       if (!grab_super_passive(sb))
+       if (!trylock_super(sb))
                return SHRINK_STOP;
 
        if (sb->s_op->nr_cached_objects)
@@ -105,7 +105,7 @@ static unsigned long super_cache_scan(struct shrinker *shrink,
                freed += sb->s_op->free_cached_objects(sb, sc);
        }
 
-       drop_super(sb);
+       up_read(&sb->s_umount);
        return freed;
 }
 
@@ -118,7 +118,7 @@ static unsigned long super_cache_count(struct shrinker *shrink,
        sb = container_of(shrink, struct super_block, s_shrink);
 
        /*
-        * Don't call grab_super_passive as it is a potential
+        * Don't call trylock_super as it is a potential
         * scalability bottleneck. The counts could get updated
         * between super_cache_count and super_cache_scan anyway.
         * Call to super_cache_count with shrinker_rwsem held
@@ -348,35 +348,31 @@ static int grab_super(struct super_block *s) __releases(sb_lock)
 }
 
 /*
- *     grab_super_passive - acquire a passive reference
+ *     trylock_super - try to grab ->s_umount shared
  *     @sb: reference we are trying to grab
  *
- *     Tries to acquire a passive reference. This is used in places where we
+ *     Try to prevent fs shutdown.  This is used in places where we
  *     cannot take an active reference but we need to ensure that the
- *     superblock does not go away while we are working on it. It returns
- *     false if a reference was not gained, and returns true with the s_umount
- *     lock held in read mode if a reference is gained. On successful return,
- *     the caller must drop the s_umount lock and the passive reference when
- *     done.
+ *     filesystem is not shut down while we are working on it. It returns
+ *     false if we cannot acquire s_umount or if we lose the race and
+ *     filesystem already got into shutdown, and returns true with the s_umount
+ *     lock held in read mode in case of success. On successful return,
+ *     the caller must drop the s_umount lock when done.
+ *
+ *     Note that unlike get_super() et.al. this one does *not* bump ->s_count.
+ *     The reason why it's safe is that we are OK with doing trylock instead
+ *     of down_read().  There's a couple of places that are OK with that, but
+ *     it's very much not a general-purpose interface.
  */
-bool grab_super_passive(struct super_block *sb)
+bool trylock_super(struct super_block *sb)
 {
-       spin_lock(&sb_lock);
-       if (hlist_unhashed(&sb->s_instances)) {
-               spin_unlock(&sb_lock);
-               return false;
-       }
-
-       sb->s_count++;
-       spin_unlock(&sb_lock);
-
        if (down_read_trylock(&sb->s_umount)) {
-               if (sb->s_root && (sb->s_flags & MS_BORN))
+               if (!hlist_unhashed(&sb->s_instances) &&
+                   sb->s_root && (sb->s_flags & MS_BORN))
                        return true;
                up_read(&sb->s_umount);
        }
 
-       put_super(sb);
        return false;
 }
 
index d61799949580a497ccae45883cb2f4c3f8e34495..df6828570e874ae423c48309f51a91d16ce949fd 100644 (file)
@@ -121,3 +121,4 @@ xfs-$(CONFIG_XFS_POSIX_ACL) += xfs_acl.o
 xfs-$(CONFIG_PROC_FS)          += xfs_stats.o
 xfs-$(CONFIG_SYSCTL)           += xfs_sysctl.o
 xfs-$(CONFIG_COMPAT)           += xfs_ioctl32.o
+xfs-$(CONFIG_NFSD_PNFS)                += xfs_pnfs.o
index 5eb4a14e0a0fdc53d45652a3f84b323f040aa2f1..b97359ba2648f12fca1bfa98e25be6efc719c8e3 100644 (file)
@@ -30,6 +30,7 @@
 #include "xfs_trace.h"
 #include "xfs_icache.h"
 #include "xfs_log.h"
+#include "xfs_pnfs.h"
 
 /*
  * Note that we only accept fileids which are long enough rather than allow
@@ -245,4 +246,9 @@ const struct export_operations xfs_export_operations = {
        .fh_to_parent           = xfs_fs_fh_to_parent,
        .get_parent             = xfs_fs_get_parent,
        .commit_metadata        = xfs_fs_nfs_commit_metadata,
+#ifdef CONFIG_NFSD_PNFS
+       .get_uuid               = xfs_fs_get_uuid,
+       .map_blocks             = xfs_fs_map_blocks,
+       .commit_blocks          = xfs_fs_commit_blocks,
+#endif
 };
index 1cdba95c78cb3e2475de29e0b6d88df3604e4cdf..ce615d12fb44cfae0d6bf344cbf0b3d2e4f43e1d 100644 (file)
@@ -36,6 +36,7 @@
 #include "xfs_trace.h"
 #include "xfs_log.h"
 #include "xfs_icache.h"
+#include "xfs_pnfs.h"
 
 #include <linux/aio.h>
 #include <linux/dcache.h>
@@ -554,6 +555,10 @@ restart:
        if (error)
                return error;
 
+       error = xfs_break_layouts(inode, iolock);
+       if (error)
+               return error;
+
        /*
         * If the offset is beyond the size of the file, we need to zero any
         * blocks that fall between the existing EOF and the start of this
@@ -822,6 +827,7 @@ xfs_file_fallocate(
        struct xfs_inode        *ip = XFS_I(inode);
        long                    error;
        enum xfs_prealloc_flags flags = 0;
+       uint                    iolock = XFS_IOLOCK_EXCL;
        loff_t                  new_size = 0;
 
        if (!S_ISREG(inode->i_mode))
@@ -830,7 +836,11 @@ xfs_file_fallocate(
                     FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE))
                return -EOPNOTSUPP;
 
-       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+       xfs_ilock(ip, iolock);
+       error = xfs_break_layouts(inode, &iolock);
+       if (error)
+               goto out_unlock;
+
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                error = xfs_free_file_space(ip, offset, len);
                if (error)
@@ -894,7 +904,7 @@ xfs_file_fallocate(
        }
 
 out_unlock:
-       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       xfs_iunlock(ip, iolock);
        return error;
 }
 
index fba6532efba44d0d7114baa24e358716aeb3c922..74efe5b760dcc2907280db8e374afe6f5914d7b3 100644 (file)
@@ -602,6 +602,12 @@ xfs_growfs_data(
        if (!mutex_trylock(&mp->m_growlock))
                return -EWOULDBLOCK;
        error = xfs_growfs_data_private(mp, in);
+       /*
+        * Increment the generation unconditionally, the error could be from
+        * updating the secondary superblocks, in which case the new size
+        * is live already.
+        */
+       mp->m_generation++;
        mutex_unlock(&mp->m_growlock);
        return error;
 }
index f7afb86c91487fc0a89c98578a28b3a1dfda83e0..ac4feae45eb308c39629f177c0b6620fae77fb69 100644 (file)
@@ -39,6 +39,7 @@
 #include "xfs_icache.h"
 #include "xfs_symlink.h"
 #include "xfs_trans.h"
+#include "xfs_pnfs.h"
 
 #include <linux/capability.h>
 #include <linux/dcache.h>
@@ -286,7 +287,7 @@ xfs_readlink_by_handle(
                return PTR_ERR(dentry);
 
        /* Restrict this handle operation to symlinks only. */
-       if (!S_ISLNK(dentry->d_inode->i_mode)) {
+       if (!d_is_symlink(dentry)) {
                error = -EINVAL;
                goto out_dput;
        }
@@ -608,6 +609,7 @@ xfs_ioc_space(
 {
        struct iattr            iattr;
        enum xfs_prealloc_flags flags = 0;
+       uint                    iolock = XFS_IOLOCK_EXCL;
        int                     error;
 
        /*
@@ -636,7 +638,10 @@ xfs_ioc_space(
        if (error)
                return error;
 
-       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+       xfs_ilock(ip, iolock);
+       error = xfs_break_layouts(inode, &iolock);
+       if (error)
+               goto out_unlock;
 
        switch (bf->l_whence) {
        case 0: /*SEEK_SET*/
@@ -725,7 +730,7 @@ xfs_ioc_space(
        error = xfs_update_prealloc_flags(ip, flags);
 
 out_unlock:
-       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       xfs_iunlock(ip, iolock);
        mnt_drop_write_file(filp);
        return error;
 }
index ce80eeb8faa472fd225571de4df196dd6d689693..d919ad7b16bf9acbe01c6531a9254b372426911a 100644 (file)
@@ -37,6 +37,7 @@
 #include "xfs_da_btree.h"
 #include "xfs_dir2.h"
 #include "xfs_trans_space.h"
+#include "xfs_pnfs.h"
 
 #include <linux/capability.h>
 #include <linux/xattr.h>
@@ -505,7 +506,7 @@ xfs_setattr_mode(
        inode->i_mode |= mode & ~S_IFMT;
 }
 
-static void
+void
 xfs_setattr_time(
        struct xfs_inode        *ip,
        struct iattr            *iattr)
@@ -979,9 +980,13 @@ xfs_vn_setattr(
        int                     error;
 
        if (iattr->ia_valid & ATTR_SIZE) {
-               xfs_ilock(ip, XFS_IOLOCK_EXCL);
-               error = xfs_setattr_size(ip, iattr);
-               xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+               uint            iolock = XFS_IOLOCK_EXCL;
+
+               xfs_ilock(ip, iolock);
+               error = xfs_break_layouts(dentry->d_inode, &iolock);
+               if (!error)
+                       error = xfs_setattr_size(ip, iattr);
+               xfs_iunlock(ip, iolock);
        } else {
                error = xfs_setattr_nonsize(ip, iattr, 0);
        }
index 1c34e4335920021d5829c5507be2f2c6c6bf60e1..ea7a98e9cb7048820bfa69c5bc294ccbe4102709 100644 (file)
@@ -32,6 +32,7 @@ extern void xfs_setup_inode(struct xfs_inode *);
  */
 #define XFS_ATTR_NOACL         0x01    /* Don't call posix_acl_chmod */
 
+extern void xfs_setattr_time(struct xfs_inode *ip, struct iattr *iattr);
 extern int xfs_setattr_nonsize(struct xfs_inode *ip, struct iattr *vap,
                               int flags);
 extern int xfs_setattr_size(struct xfs_inode *ip, struct iattr *vap);
index a5b2ff8226535d44443ce08369162f01e1d0cce2..0d8abd6364d97e613d98d4f827814c7eb7beb207 100644 (file)
@@ -174,6 +174,17 @@ typedef struct xfs_mount {
        struct workqueue_struct *m_reclaim_workqueue;
        struct workqueue_struct *m_log_workqueue;
        struct workqueue_struct *m_eofblocks_workqueue;
+
+       /*
+        * Generation of the filesysyem layout.  This is incremented by each
+        * growfs, and used by the pNFS server to ensure the client updates
+        * its view of the block device once it gets a layout that might
+        * reference the newly added blocks.  Does not need to be persistent
+        * as long as we only allow file system size increments, but if we
+        * ever support shrinks it would have to be persisted in addition
+        * to various other kinds of pain inflicted on the pNFS server.
+        */
+       __uint32_t              m_generation;
 } xfs_mount_t;
 
 /*
diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c
new file mode 100644 (file)
index 0000000..4b33ef1
--- /dev/null
@@ -0,0 +1,322 @@
+/*
+ * Copyright (c) 2014 Christoph Hellwig.
+ */
+#include "xfs.h"
+#include "xfs_format.h"
+#include "xfs_log_format.h"
+#include "xfs_trans_resv.h"
+#include "xfs_sb.h"
+#include "xfs_mount.h"
+#include "xfs_inode.h"
+#include "xfs_trans.h"
+#include "xfs_log.h"
+#include "xfs_bmap.h"
+#include "xfs_bmap_util.h"
+#include "xfs_error.h"
+#include "xfs_iomap.h"
+#include "xfs_shared.h"
+#include "xfs_bit.h"
+#include "xfs_pnfs.h"
+
+/*
+ * Ensure that we do not have any outstanding pNFS layouts that can be used by
+ * clients to directly read from or write to this inode.  This must be called
+ * before every operation that can remove blocks from the extent map.
+ * Additionally we call it during the write operation, where aren't concerned
+ * about exposing unallocated blocks but just want to provide basic
+ * synchronization between a local writer and pNFS clients.  mmap writes would
+ * also benefit from this sort of synchronization, but due to the tricky locking
+ * rules in the page fault path we don't bother.
+ */
+int
+xfs_break_layouts(
+       struct inode            *inode,
+       uint                    *iolock)
+{
+       struct xfs_inode        *ip = XFS_I(inode);
+       int                     error;
+
+       ASSERT(xfs_isilocked(ip, XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));
+
+       while ((error = break_layout(inode, false) == -EWOULDBLOCK)) {
+               xfs_iunlock(ip, *iolock);
+               error = break_layout(inode, true);
+               *iolock = XFS_IOLOCK_EXCL;
+               xfs_ilock(ip, *iolock);
+       }
+
+       return error;
+}
+
+/*
+ * Get a unique ID including its location so that the client can identify
+ * the exported device.
+ */
+int
+xfs_fs_get_uuid(
+       struct super_block      *sb,
+       u8                      *buf,
+       u32                     *len,
+       u64                     *offset)
+{
+       struct xfs_mount        *mp = XFS_M(sb);
+
+       printk_once(KERN_NOTICE
+"XFS (%s): using experimental pNFS feature, use at your own risk!\n",
+               mp->m_fsname);
+
+       if (*len < sizeof(uuid_t))
+               return -EINVAL;
+
+       memcpy(buf, &mp->m_sb.sb_uuid, sizeof(uuid_t));
+       *len = sizeof(uuid_t);
+       *offset = offsetof(struct xfs_dsb, sb_uuid);
+       return 0;
+}
+
+static void
+xfs_bmbt_to_iomap(
+       struct xfs_inode        *ip,
+       struct iomap            *iomap,
+       struct xfs_bmbt_irec    *imap)
+{
+       struct xfs_mount        *mp = ip->i_mount;
+
+       if (imap->br_startblock == HOLESTARTBLOCK) {
+               iomap->blkno = IOMAP_NULL_BLOCK;
+               iomap->type = IOMAP_HOLE;
+       } else if (imap->br_startblock == DELAYSTARTBLOCK) {
+               iomap->blkno = IOMAP_NULL_BLOCK;
+               iomap->type = IOMAP_DELALLOC;
+       } else {
+               iomap->blkno =
+                       XFS_FSB_TO_DADDR(ip->i_mount, imap->br_startblock);
+               if (imap->br_state == XFS_EXT_UNWRITTEN)
+                       iomap->type = IOMAP_UNWRITTEN;
+               else
+                       iomap->type = IOMAP_MAPPED;
+       }
+       iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff);
+       iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount);
+}
+
+/*
+ * Get a layout for the pNFS client.
+ */
+int
+xfs_fs_map_blocks(
+       struct inode            *inode,
+       loff_t                  offset,
+       u64                     length,
+       struct iomap            *iomap,
+       bool                    write,
+       u32                     *device_generation)
+{
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_bmbt_irec    imap;
+       xfs_fileoff_t           offset_fsb, end_fsb;
+       loff_t                  limit;
+       int                     bmapi_flags = XFS_BMAPI_ENTIRE;
+       int                     nimaps = 1;
+       uint                    lock_flags;
+       int                     error = 0;
+
+       if (XFS_FORCED_SHUTDOWN(mp))
+               return -EIO;
+
+       /*
+        * We can't export inodes residing on the realtime device.  The realtime
+        * device doesn't have a UUID to identify it, so the client has no way
+        * to find it.
+        */
+       if (XFS_IS_REALTIME_INODE(ip))
+               return -ENXIO;
+
+       /*
+        * Lock out any other I/O before we flush and invalidate the pagecache,
+        * and then hand out a layout to the remote system.  This is very
+        * similar to direct I/O, except that the synchronization is much more
+        * complicated.  See the comment near xfs_break_layouts for a detailed
+        * explanation.
+        */
+       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+       error = -EINVAL;
+       limit = mp->m_super->s_maxbytes;
+       if (!write)
+               limit = max(limit, round_up(i_size_read(inode),
+                                    inode->i_sb->s_blocksize));
+       if (offset > limit)
+               goto out_unlock;
+       if (offset > limit - length)
+               length = limit - offset;
+
+       error = filemap_write_and_wait(inode->i_mapping);
+       if (error)
+               goto out_unlock;
+       error = invalidate_inode_pages2(inode->i_mapping);
+       if (WARN_ON_ONCE(error))
+               return error;
+
+       end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + length);
+       offset_fsb = XFS_B_TO_FSBT(mp, offset);
+
+       lock_flags = xfs_ilock_data_map_shared(ip);
+       error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb,
+                               &imap, &nimaps, bmapi_flags);
+       xfs_iunlock(ip, lock_flags);
+
+       if (error)
+               goto out_unlock;
+
+       if (write) {
+               enum xfs_prealloc_flags flags = 0;
+
+               ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
+
+               if (!nimaps || imap.br_startblock == HOLESTARTBLOCK) {
+                       error = xfs_iomap_write_direct(ip, offset, length,
+                                                      &imap, nimaps);
+                       if (error)
+                               goto out_unlock;
+
+                       /*
+                        * Ensure the next transaction is committed
+                        * synchronously so that the blocks allocated and
+                        * handed out to the client are guaranteed to be
+                        * present even after a server crash.
+                        */
+                       flags |= XFS_PREALLOC_SET | XFS_PREALLOC_SYNC;
+               }
+
+               error = xfs_update_prealloc_flags(ip, flags);
+               if (error)
+                       goto out_unlock;
+       }
+       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+
+       xfs_bmbt_to_iomap(ip, iomap, &imap);
+       *device_generation = mp->m_generation;
+       return error;
+out_unlock:
+       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       return error;
+}
+
+/*
+ * Ensure the size update falls into a valid allocated block.
+ */
+static int
+xfs_pnfs_validate_isize(
+       struct xfs_inode        *ip,
+       xfs_off_t               isize)
+{
+       struct xfs_bmbt_irec    imap;
+       int                     nimaps = 1;
+       int                     error = 0;
+
+       xfs_ilock(ip, XFS_ILOCK_SHARED);
+       error = xfs_bmapi_read(ip, XFS_B_TO_FSBT(ip->i_mount, isize - 1), 1,
+                               &imap, &nimaps, 0);
+       xfs_iunlock(ip, XFS_ILOCK_SHARED);
+       if (error)
+               return error;
+
+       if (imap.br_startblock == HOLESTARTBLOCK ||
+           imap.br_startblock == DELAYSTARTBLOCK ||
+           imap.br_state == XFS_EXT_UNWRITTEN)
+               return -EIO;
+       return 0;
+}
+
+/*
+ * Make sure the blocks described by maps are stable on disk.  This includes
+ * converting any unwritten extents, flushing the disk cache and updating the
+ * time stamps.
+ *
+ * Note that we rely on the caller to always send us a timestamp update so that
+ * we always commit a transaction here.  If that stops being true we will have
+ * to manually flush the cache here similar to what the fsync code path does
+ * for datasyncs on files that have no dirty metadata.
+ */
+int
+xfs_fs_commit_blocks(
+       struct inode            *inode,
+       struct iomap            *maps,
+       int                     nr_maps,
+       struct iattr            *iattr)
+{
+       struct xfs_inode        *ip = XFS_I(inode);
+       struct xfs_mount        *mp = ip->i_mount;
+       struct xfs_trans        *tp;
+       bool                    update_isize = false;
+       int                     error, i;
+       loff_t                  size;
+
+       ASSERT(iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME));
+
+       xfs_ilock(ip, XFS_IOLOCK_EXCL);
+
+       size = i_size_read(inode);
+       if ((iattr->ia_valid & ATTR_SIZE) && iattr->ia_size > size) {
+               update_isize = true;
+               size = iattr->ia_size;
+       }
+
+       for (i = 0; i < nr_maps; i++) {
+               u64 start, length, end;
+
+               start = maps[i].offset;
+               if (start > size)
+                       continue;
+
+               end = start + maps[i].length;
+               if (end > size)
+                       end = size;
+
+               length = end - start;
+               if (!length)
+                       continue;
+       
+               /*
+                * Make sure reads through the pagecache see the new data.
+                */
+               error = invalidate_inode_pages2_range(inode->i_mapping,
+                                       start >> PAGE_CACHE_SHIFT,
+                                       (end - 1) >> PAGE_CACHE_SHIFT);
+               WARN_ON_ONCE(error);
+
+               error = xfs_iomap_write_unwritten(ip, start, length);
+               if (error)
+                       goto out_drop_iolock;
+       }
+
+       if (update_isize) {
+               error = xfs_pnfs_validate_isize(ip, size);
+               if (error)
+                       goto out_drop_iolock;
+       }
+
+       tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE);
+       error = xfs_trans_reserve(tp, &M_RES(mp)->tr_ichange, 0, 0);
+       if (error)
+               goto out_drop_iolock;
+
+       xfs_ilock(ip, XFS_ILOCK_EXCL);
+       xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       xfs_setattr_time(ip, iattr);
+       if (update_isize) {
+               i_size_write(inode, iattr->ia_size);
+               ip->i_d.di_size = iattr->ia_size;
+       }
+
+       xfs_trans_set_sync(tp);
+       error = xfs_trans_commit(tp, 0);
+
+out_drop_iolock:
+       xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+       return error;
+}
diff --git a/fs/xfs/xfs_pnfs.h b/fs/xfs/xfs_pnfs.h
new file mode 100644 (file)
index 0000000..b7fbfce
--- /dev/null
@@ -0,0 +1,18 @@
+#ifndef _XFS_PNFS_H
+#define _XFS_PNFS_H 1
+
+#ifdef CONFIG_NFSD_PNFS
+int xfs_fs_get_uuid(struct super_block *sb, u8 *buf, u32 *len, u64 *offset);
+int xfs_fs_map_blocks(struct inode *inode, loff_t offset, u64 length,
+               struct iomap *iomap, bool write, u32 *device_generation);
+int xfs_fs_commit_blocks(struct inode *inode, struct iomap *maps, int nr_maps,
+               struct iattr *iattr);
+
+int xfs_break_layouts(struct inode *inode, uint *iolock);
+#else
+static inline int xfs_break_layouts(struct inode *inode, uint *iolock)
+{
+       return 0;
+}
+#endif /* CONFIG_NFSD_PNFS */
+#endif /* _XFS_PNFS_H */
diff --git a/include/acpi/acpi_lpat.h b/include/acpi/acpi_lpat.h
new file mode 100644 (file)
index 0000000..da37e12
--- /dev/null
@@ -0,0 +1,65 @@
+/*
+ * acpi_lpat.h - LPAT table processing functions
+ *
+ * Copyright (C) 2015 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version
+ * 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef ACPI_LPAT_H
+#define ACPI_LPAT_H
+
+struct acpi_lpat {
+       int temp;
+       int raw;
+};
+
+struct acpi_lpat_conversion_table {
+       struct acpi_lpat *lpat;
+       int lpat_count;
+};
+
+#ifdef CONFIG_ACPI
+
+int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
+                         int raw);
+int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
+                         int temp);
+struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(acpi_handle
+                                                                 handle);
+void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
+                                    *lpat_table);
+
+#else
+static int acpi_lpat_raw_to_temp(struct acpi_lpat_conversion_table *lpat_table,
+                                int raw)
+{
+       return 0;
+}
+
+static int acpi_lpat_temp_to_raw(struct acpi_lpat_conversion_table *lpat_table,
+                                int temp)
+{
+       return 0;
+}
+
+static struct acpi_lpat_conversion_table *acpi_lpat_get_conversion_table(
+                                                       acpi_handle handle)
+{
+       return NULL;
+}
+
+static void acpi_lpat_free_conversion_table(struct acpi_lpat_conversion_table
+                                           *lpat_table)
+{
+}
+
+#endif
+#endif
diff --git a/include/dt-bindings/clock/alphascale,asm9260.h b/include/dt-bindings/clock/alphascale,asm9260.h
new file mode 100644 (file)
index 0000000..04e8db2
--- /dev/null
@@ -0,0 +1,97 @@
+/*
+ * Copyright 2014 Oleksij Rempel <linux@rempel-privat.de>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_ASM9260_H
+#define _DT_BINDINGS_CLK_ASM9260_H
+
+/* ahb gate */
+#define CLKID_AHB_ROM          0
+#define CLKID_AHB_RAM          1
+#define CLKID_AHB_GPIO         2
+#define CLKID_AHB_MAC          3
+#define CLKID_AHB_EMI          4
+#define CLKID_AHB_USB0         5
+#define CLKID_AHB_USB1         6
+#define CLKID_AHB_DMA0         7
+#define CLKID_AHB_DMA1         8
+#define CLKID_AHB_UART0                9
+#define CLKID_AHB_UART1                10
+#define CLKID_AHB_UART2                11
+#define CLKID_AHB_UART3                12
+#define CLKID_AHB_UART4                13
+#define CLKID_AHB_UART5                14
+#define CLKID_AHB_UART6                15
+#define CLKID_AHB_UART7                16
+#define CLKID_AHB_UART8                17
+#define CLKID_AHB_UART9                18
+#define CLKID_AHB_I2S0         19
+#define CLKID_AHB_I2C0         20
+#define CLKID_AHB_I2C1         21
+#define CLKID_AHB_SSP0         22
+#define CLKID_AHB_IOCONFIG     23
+#define CLKID_AHB_WDT          24
+#define CLKID_AHB_CAN0         25
+#define CLKID_AHB_CAN1         26
+#define CLKID_AHB_MPWM         27
+#define CLKID_AHB_SPI0         28
+#define CLKID_AHB_SPI1         29
+#define CLKID_AHB_QEI          30
+#define CLKID_AHB_QUADSPI0     31
+#define CLKID_AHB_CAMIF                32
+#define CLKID_AHB_LCDIF                33
+#define CLKID_AHB_TIMER0       34
+#define CLKID_AHB_TIMER1       35
+#define CLKID_AHB_TIMER2       36
+#define CLKID_AHB_TIMER3       37
+#define CLKID_AHB_IRQ          38
+#define CLKID_AHB_RTC          39
+#define CLKID_AHB_NAND         40
+#define CLKID_AHB_ADC0         41
+#define CLKID_AHB_LED          42
+#define CLKID_AHB_DAC0         43
+#define CLKID_AHB_LCD          44
+#define CLKID_AHB_I2S1         45
+#define CLKID_AHB_MAC1         46
+
+/* devider */
+#define CLKID_SYS_CPU          47
+#define CLKID_SYS_AHB          48
+#define CLKID_SYS_I2S0M                49
+#define CLKID_SYS_I2S0S                50
+#define CLKID_SYS_I2S1M                51
+#define CLKID_SYS_I2S1S                52
+#define CLKID_SYS_UART0                53
+#define CLKID_SYS_UART1                54
+#define CLKID_SYS_UART2                55
+#define CLKID_SYS_UART3                56
+#define CLKID_SYS_UART4                56
+#define CLKID_SYS_UART5                57
+#define CLKID_SYS_UART6                58
+#define CLKID_SYS_UART7                59
+#define CLKID_SYS_UART8                60
+#define CLKID_SYS_UART9                61
+#define CLKID_SYS_SPI0         62
+#define CLKID_SYS_SPI1         63
+#define CLKID_SYS_QUADSPI      64
+#define CLKID_SYS_SSP0         65
+#define CLKID_SYS_NAND         66
+#define CLKID_SYS_TRACE                67
+#define CLKID_SYS_CAMM         68
+#define CLKID_SYS_WDT          69
+#define CLKID_SYS_CLKOUT       70
+#define CLKID_SYS_MAC          71
+#define CLKID_SYS_LCD          72
+#define CLKID_SYS_ADCANA       73
+
+#define MAX_CLKS               74
+#endif
index 34fe28c622d0a6c2cbfbef348be34dfefa2b59f9..c4b1676ea674abb5c6f598525b3f0ca09e39e1e7 100644 (file)
 #define CLK_DIV_MCUISP1                453 /* Exynos4x12 only */
 #define CLK_DIV_ACLK200                454 /* Exynos4x12 only */
 #define CLK_DIV_ACLK400_MCUISP 455 /* Exynos4x12 only */
+#define CLK_DIV_ACP            456
+#define CLK_DIV_DMC            457
+#define CLK_DIV_C2C            458 /* Exynos4x12 only */
+#define CLK_DIV_GDL            459
+#define CLK_DIV_GDR            460
 
 /* must be greater than maximal clock id */
-#define CLK_NR_CLKS            456
+#define CLK_NR_CLKS            461
 
 #endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */
index 8e4681b07ae79fcaaf6b9e563ac360da6ac198d0..e33c75a3c09dc7caf26575a7e3469788aa6cc234 100644 (file)
 #define DOUT_SCLK_CC_PLL               4
 #define DOUT_SCLK_MFC_PLL              5
 #define DOUT_ACLK_CCORE_133            6
-#define TOPC_NR_CLK                    7
+#define DOUT_ACLK_MSCL_532             7
+#define ACLK_MSCL_532                  8
+#define DOUT_SCLK_AUD_PLL              9
+#define FOUT_AUD_PLL                   10
+#define TOPC_NR_CLK                    11
 
 /* TOP0 */
 #define DOUT_ACLK_PERIC1               1
 #define CLK_SCLK_UART1                 4
 #define CLK_SCLK_UART2                 5
 #define CLK_SCLK_UART3                 6
-#define TOP0_NR_CLK                    7
+#define CLK_SCLK_SPI0                  7
+#define CLK_SCLK_SPI1                  8
+#define CLK_SCLK_SPI2                  9
+#define CLK_SCLK_SPI3                  10
+#define CLK_SCLK_SPI4                  11
+#define CLK_SCLK_SPDIF                 12
+#define CLK_SCLK_PCM1                  13
+#define CLK_SCLK_I2S1                  14
+#define TOP0_NR_CLK                    15
 
 /* TOP1 */
 #define DOUT_ACLK_FSYS1_200            1
 #define PCLK_HSI2C6                    9
 #define PCLK_HSI2C7                    10
 #define PCLK_HSI2C8                    11
-#define PERIC1_NR_CLK                  12
+#define PCLK_SPI0                      12
+#define PCLK_SPI1                      13
+#define PCLK_SPI2                      14
+#define PCLK_SPI3                      15
+#define PCLK_SPI4                      16
+#define SCLK_SPI0                      17
+#define SCLK_SPI1                      18
+#define SCLK_SPI2                      19
+#define SCLK_SPI3                      20
+#define SCLK_SPI4                      21
+#define PCLK_I2S1                      22
+#define PCLK_PCM1                      23
+#define PCLK_SPDIF                     24
+#define SCLK_I2S1                      25
+#define SCLK_PCM1                      26
+#define SCLK_SPDIF                     27
+#define PERIC1_NR_CLK                  28
 
 /* PERIS */
 #define PCLK_CHIPID                    1
 
 /* FSYS0 */
 #define ACLK_MMC2                      1
-#define FSYS0_NR_CLK                   2
+#define ACLK_AXIUS_USBDRD30X_FSYS0X    2
+#define ACLK_USBDRD300                 3
+#define SCLK_USBDRD300_SUSPENDCLK      4
+#define SCLK_USBDRD300_REFCLK          5
+#define PHYCLK_USBDRD300_UDRD30_PIPE_PCLK_USER         6
+#define PHYCLK_USBDRD300_UDRD30_PHYCLK_USER            7
+#define OSCCLK_PHY_CLKOUT_USB30_PHY            8
+#define ACLK_PDMA0                     9
+#define ACLK_PDMA1                     10
+#define FSYS0_NR_CLK                   11
 
 /* FSYS1 */
 #define ACLK_MMC1                      1
 #define ACLK_MMC0                      2
 #define FSYS1_NR_CLK                   3
 
+/* MSCL */
+#define USERMUX_ACLK_MSCL_532          1
+#define DOUT_PCLK_MSCL                 2
+#define ACLK_MSCL_0                    3
+#define ACLK_MSCL_1                    4
+#define ACLK_JPEG                      5
+#define ACLK_G2D                       6
+#define ACLK_LH_ASYNC_SI_MSCL_0                7
+#define ACLK_LH_ASYNC_SI_MSCL_1                8
+#define ACLK_AXI2ACEL_BRIDGE           9
+#define ACLK_XIU_MSCLX_0               10
+#define ACLK_XIU_MSCLX_1               11
+#define ACLK_QE_MSCL_0                 12
+#define ACLK_QE_MSCL_1                 13
+#define ACLK_QE_JPEG                   14
+#define ACLK_QE_G2D                    15
+#define ACLK_PPMU_MSCL_0               16
+#define ACLK_PPMU_MSCL_1               17
+#define ACLK_MSCLNP_133                        18
+#define ACLK_AHB2APB_MSCL0P            19
+#define ACLK_AHB2APB_MSCL1P            20
+
+#define PCLK_MSCL_0                    21
+#define PCLK_MSCL_1                    22
+#define PCLK_JPEG                      23
+#define PCLK_G2D                       24
+#define PCLK_QE_MSCL_0                 25
+#define PCLK_QE_MSCL_1                 26
+#define PCLK_QE_JPEG                   27
+#define PCLK_QE_G2D                    28
+#define PCLK_PPMU_MSCL_0               29
+#define PCLK_PPMU_MSCL_1               30
+#define PCLK_AXI2ACEL_BRIDGE           31
+#define PCLK_PMU_MSCL                  32
+#define MSCL_NR_CLK                    33
+
+/* AUD */
+#define SCLK_I2S                       1
+#define SCLK_PCM                       2
+#define PCLK_I2S                       3
+#define PCLK_PCM                       4
+#define ACLK_ADMA                      5
+#define AUD_NR_CLK                     6
 #endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */
index b857cadb0bd40ef8c2843693b6fa8c9f7bb11528..04fb29ae30e69801f37713f1284d165396f8525a 100644 (file)
 #define PLL0_VOTE                              221
 #define PLL3                                   222
 #define PLL3_VOTE                              223
-#define PLL4                                   224
 #define PLL4_VOTE                              225
 #define PLL8                                   226
 #define PLL8_VOTE                              227
diff --git a/include/dt-bindings/clock/qcom,lcc-ipq806x.h b/include/dt-bindings/clock/qcom,lcc-ipq806x.h
new file mode 100644 (file)
index 0000000..4e944b8
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_LCC_IPQ806X_H
+#define _DT_BINDINGS_CLK_LCC_IPQ806X_H
+
+#define PLL4                           0
+#define MI2S_OSR_SRC                   1
+#define MI2S_OSR_CLK                   2
+#define MI2S_DIV_CLK                   3
+#define MI2S_BIT_DIV_CLK               4
+#define MI2S_BIT_CLK                   5
+#define PCM_SRC                                6
+#define PCM_CLK_OUT                    7
+#define PCM_CLK                                8
+#define SPDIF_SRC                      9
+#define SPDIF_CLK                      10
+#define AHBIX_CLK                      11
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-msm8960.h b/include/dt-bindings/clock/qcom,lcc-msm8960.h
new file mode 100644 (file)
index 0000000..4fb2aa6
--- /dev/null
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_CLK_LCC_MSM8960_H
+#define _DT_BINDINGS_CLK_LCC_MSM8960_H
+
+#define PLL4                           0
+#define MI2S_OSR_SRC                   1
+#define MI2S_OSR_CLK                   2
+#define MI2S_DIV_CLK                   3
+#define MI2S_BIT_DIV_CLK               4
+#define MI2S_BIT_CLK                   5
+#define PCM_SRC                                6
+#define PCM_CLK_OUT                    7
+#define PCM_CLK                                8
+#define SLIMBUS_SRC                    9
+#define AUDIO_SLIMBUS_CLK              10
+#define SPS_SLIMBUS_CLK                        11
+#define CODEC_I2S_MIC_OSR_SRC          12
+#define CODEC_I2S_MIC_OSR_CLK          13
+#define CODEC_I2S_MIC_DIV_CLK          14
+#define CODEC_I2S_MIC_BIT_DIV_CLK      15
+#define CODEC_I2S_MIC_BIT_CLK          16
+#define SPARE_I2S_MIC_OSR_SRC          17
+#define SPARE_I2S_MIC_OSR_CLK          18
+#define SPARE_I2S_MIC_DIV_CLK          19
+#define SPARE_I2S_MIC_BIT_DIV_CLK      20
+#define SPARE_I2S_MIC_BIT_CLK          21
+#define CODEC_I2S_SPKR_OSR_SRC         22
+#define CODEC_I2S_SPKR_OSR_CLK         23
+#define CODEC_I2S_SPKR_DIV_CLK         24
+#define CODEC_I2S_SPKR_BIT_DIV_CLK     25
+#define CODEC_I2S_SPKR_BIT_CLK         26
+#define SPARE_I2S_SPKR_OSR_SRC         27
+#define SPARE_I2S_SPKR_OSR_CLK         28
+#define SPARE_I2S_SPKR_DIV_CLK         29
+#define SPARE_I2S_SPKR_BIT_DIV_CLK     30
+#define SPARE_I2S_SPKR_BIT_CLK         31
+
+#endif
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
new file mode 100644 (file)
index 0000000..ae2eb17
--- /dev/null
@@ -0,0 +1,345 @@
+/*
+ * This header provides constants for binding nvidia,tegra124-car or
+ * nvidia,tegra132-car.
+ *
+ * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
+ * registers. These IDs often match those in the CAR's RST_DEVICES registers,
+ * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
+ * this case, those clocks are assigned IDs above 185 in order to highlight
+ * this issue. Implementations that interpret these clock IDs as bit values
+ * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
+ * explicitly handle these special cases.
+ *
+ * The balance of the clocks controlled by the CAR are assigned IDs of 185 and
+ * above.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H
+#define _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H
+
+/* 0 */
+/* 1 */
+/* 2 */
+#define TEGRA124_CLK_ISPB 3
+#define TEGRA124_CLK_RTC 4
+#define TEGRA124_CLK_TIMER 5
+#define TEGRA124_CLK_UARTA 6
+/* 7 (register bit affects uartb and vfir) */
+/* 8 */
+#define TEGRA124_CLK_SDMMC2 9
+/* 10 (register bit affects spdif_in and spdif_out) */
+#define TEGRA124_CLK_I2S1 11
+#define TEGRA124_CLK_I2C1 12
+/* 13 */
+#define TEGRA124_CLK_SDMMC1 14
+#define TEGRA124_CLK_SDMMC4 15
+/* 16 */
+#define TEGRA124_CLK_PWM 17
+#define TEGRA124_CLK_I2S2 18
+/* 20 (register bit affects vi and vi_sensor) */
+/* 21 */
+#define TEGRA124_CLK_USBD 22
+#define TEGRA124_CLK_ISP 23
+/* 26 */
+/* 25 */
+#define TEGRA124_CLK_DISP2 26
+#define TEGRA124_CLK_DISP1 27
+#define TEGRA124_CLK_HOST1X 28
+#define TEGRA124_CLK_VCP 29
+#define TEGRA124_CLK_I2S0 30
+/* 31 */
+
+#define TEGRA124_CLK_MC 32
+/* 33 */
+#define TEGRA124_CLK_APBDMA 34
+/* 35 */
+#define TEGRA124_CLK_KBC 36
+/* 37 */
+/* 38 */
+/* 39 (register bit affects fuse and fuse_burn) */
+#define TEGRA124_CLK_KFUSE 40
+#define TEGRA124_CLK_SBC1 41
+#define TEGRA124_CLK_NOR 42
+/* 43 */
+#define TEGRA124_CLK_SBC2 44
+/* 45 */
+#define TEGRA124_CLK_SBC3 46
+#define TEGRA124_CLK_I2C5 47
+#define TEGRA124_CLK_DSIA 48
+/* 49 */
+#define TEGRA124_CLK_MIPI 50
+#define TEGRA124_CLK_HDMI 51
+#define TEGRA124_CLK_CSI 52
+/* 53 */
+#define TEGRA124_CLK_I2C2 54
+#define TEGRA124_CLK_UARTC 55
+#define TEGRA124_CLK_MIPI_CAL 56
+#define TEGRA124_CLK_EMC 57
+#define TEGRA124_CLK_USB2 58
+#define TEGRA124_CLK_USB3 59
+/* 60 */
+#define TEGRA124_CLK_VDE 61
+#define TEGRA124_CLK_BSEA 62
+#define TEGRA124_CLK_BSEV 63
+
+/* 64 */
+#define TEGRA124_CLK_UARTD 65
+/* 66 */
+#define TEGRA124_CLK_I2C3 67
+#define TEGRA124_CLK_SBC4 68
+#define TEGRA124_CLK_SDMMC3 69
+#define TEGRA124_CLK_PCIE 70
+#define TEGRA124_CLK_OWR 71
+#define TEGRA124_CLK_AFI 72
+#define TEGRA124_CLK_CSITE 73
+/* 74 */
+/* 75 */
+#define TEGRA124_CLK_LA 76
+#define TEGRA124_CLK_TRACE 77
+#define TEGRA124_CLK_SOC_THERM 78
+#define TEGRA124_CLK_DTV 79
+/* 80 */
+#define TEGRA124_CLK_I2CSLOW 81
+#define TEGRA124_CLK_DSIB 82
+#define TEGRA124_CLK_TSEC 83
+/* 84 */
+/* 85 */
+/* 86 */
+/* 87 */
+/* 88 */
+#define TEGRA124_CLK_XUSB_HOST 89
+/* 90 */
+#define TEGRA124_CLK_MSENC 91
+#define TEGRA124_CLK_CSUS 92
+/* 93 */
+/* 94 */
+/* 95 (bit affects xusb_dev and xusb_dev_src) */
+
+/* 96 */
+/* 97 */
+/* 98 */
+#define TEGRA124_CLK_MSELECT 99
+#define TEGRA124_CLK_TSENSOR 100
+#define TEGRA124_CLK_I2S3 101
+#define TEGRA124_CLK_I2S4 102
+#define TEGRA124_CLK_I2C4 103
+#define TEGRA124_CLK_SBC5 104
+#define TEGRA124_CLK_SBC6 105
+#define TEGRA124_CLK_D_AUDIO 106
+#define TEGRA124_CLK_APBIF 107
+#define TEGRA124_CLK_DAM0 108
+#define TEGRA124_CLK_DAM1 109
+#define TEGRA124_CLK_DAM2 110
+#define TEGRA124_CLK_HDA2CODEC_2X 111
+/* 112 */
+#define TEGRA124_CLK_AUDIO0_2X 113
+#define TEGRA124_CLK_AUDIO1_2X 114
+#define TEGRA124_CLK_AUDIO2_2X 115
+#define TEGRA124_CLK_AUDIO3_2X 116
+#define TEGRA124_CLK_AUDIO4_2X 117
+#define TEGRA124_CLK_SPDIF_2X 118
+#define TEGRA124_CLK_ACTMON 119
+#define TEGRA124_CLK_EXTERN1 120
+#define TEGRA124_CLK_EXTERN2 121
+#define TEGRA124_CLK_EXTERN3 122
+#define TEGRA124_CLK_SATA_OOB 123
+#define TEGRA124_CLK_SATA 124
+#define TEGRA124_CLK_HDA 125
+/* 126 */
+#define TEGRA124_CLK_SE 127
+
+#define TEGRA124_CLK_HDA2HDMI 128
+#define TEGRA124_CLK_SATA_COLD 129
+/* 130 */
+/* 131 */
+/* 132 */
+/* 133 */
+/* 134 */
+/* 135 */
+/* 136 */
+/* 137 */
+/* 138 */
+/* 139 */
+/* 140 */
+/* 141 */
+/* 142 */
+/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */
+/*      xusb_host_src and xusb_ss_src) */
+#define TEGRA124_CLK_CILAB 144
+#define TEGRA124_CLK_CILCD 145
+#define TEGRA124_CLK_CILE 146
+#define TEGRA124_CLK_DSIALP 147
+#define TEGRA124_CLK_DSIBLP 148
+#define TEGRA124_CLK_ENTROPY 149
+#define TEGRA124_CLK_DDS 150
+/* 151 */
+#define TEGRA124_CLK_DP2 152
+#define TEGRA124_CLK_AMX 153
+#define TEGRA124_CLK_ADX 154
+/* 155 (bit affects dfll_ref and dfll_soc) */
+#define TEGRA124_CLK_XUSB_SS 156
+/* 157 */
+/* 158 */
+/* 159 */
+
+/* 160 */
+/* 161 */
+/* 162 */
+/* 163 */
+/* 164 */
+/* 165 */
+#define TEGRA124_CLK_I2C6 166
+/* 167 */
+/* 168 */
+/* 169 */
+/* 170 */
+#define TEGRA124_CLK_VIM2_CLK 171
+/* 172 */
+/* 173 */
+/* 174 */
+/* 175 */
+#define TEGRA124_CLK_HDMI_AUDIO 176
+#define TEGRA124_CLK_CLK72MHZ 177
+#define TEGRA124_CLK_VIC03 178
+/* 179 */
+#define TEGRA124_CLK_ADX1 180
+#define TEGRA124_CLK_DPAUX 181
+#define TEGRA124_CLK_SOR0 182
+/* 183 */
+#define TEGRA124_CLK_GPU 184
+#define TEGRA124_CLK_AMX1 185
+/* 186 */
+/* 187 */
+/* 188 */
+/* 189 */
+/* 190 */
+/* 191 */
+#define TEGRA124_CLK_UARTB 192
+#define TEGRA124_CLK_VFIR 193
+#define TEGRA124_CLK_SPDIF_IN 194
+#define TEGRA124_CLK_SPDIF_OUT 195
+#define TEGRA124_CLK_VI 196
+#define TEGRA124_CLK_VI_SENSOR 197
+#define TEGRA124_CLK_FUSE 198
+#define TEGRA124_CLK_FUSE_BURN 199
+#define TEGRA124_CLK_CLK_32K 200
+#define TEGRA124_CLK_CLK_M 201
+#define TEGRA124_CLK_CLK_M_DIV2 202
+#define TEGRA124_CLK_CLK_M_DIV4 203
+#define TEGRA124_CLK_PLL_REF 204
+#define TEGRA124_CLK_PLL_C 205
+#define TEGRA124_CLK_PLL_C_OUT1 206
+#define TEGRA124_CLK_PLL_C2 207
+#define TEGRA124_CLK_PLL_C3 208
+#define TEGRA124_CLK_PLL_M 209
+#define TEGRA124_CLK_PLL_M_OUT1 210
+#define TEGRA124_CLK_PLL_P 211
+#define TEGRA124_CLK_PLL_P_OUT1 212
+#define TEGRA124_CLK_PLL_P_OUT2 213
+#define TEGRA124_CLK_PLL_P_OUT3 214
+#define TEGRA124_CLK_PLL_P_OUT4 215
+#define TEGRA124_CLK_PLL_A 216
+#define TEGRA124_CLK_PLL_A_OUT0 217
+#define TEGRA124_CLK_PLL_D 218
+#define TEGRA124_CLK_PLL_D_OUT0 219
+#define TEGRA124_CLK_PLL_D2 220
+#define TEGRA124_CLK_PLL_D2_OUT0 221
+#define TEGRA124_CLK_PLL_U 222
+#define TEGRA124_CLK_PLL_U_480M 223
+
+#define TEGRA124_CLK_PLL_U_60M 224
+#define TEGRA124_CLK_PLL_U_48M 225
+#define TEGRA124_CLK_PLL_U_12M 226
+/* 227 */
+/* 228 */
+#define TEGRA124_CLK_PLL_RE_VCO 229
+#define TEGRA124_CLK_PLL_RE_OUT 230
+#define TEGRA124_CLK_PLL_E 231
+#define TEGRA124_CLK_SPDIF_IN_SYNC 232
+#define TEGRA124_CLK_I2S0_SYNC 233
+#define TEGRA124_CLK_I2S1_SYNC 234
+#define TEGRA124_CLK_I2S2_SYNC 235
+#define TEGRA124_CLK_I2S3_SYNC 236
+#define TEGRA124_CLK_I2S4_SYNC 237
+#define TEGRA124_CLK_VIMCLK_SYNC 238
+#define TEGRA124_CLK_AUDIO0 239
+#define TEGRA124_CLK_AUDIO1 240
+#define TEGRA124_CLK_AUDIO2 241
+#define TEGRA124_CLK_AUDIO3 242
+#define TEGRA124_CLK_AUDIO4 243
+#define TEGRA124_CLK_SPDIF 244
+#define TEGRA124_CLK_CLK_OUT_1 245
+#define TEGRA124_CLK_CLK_OUT_2 246
+#define TEGRA124_CLK_CLK_OUT_3 247
+#define TEGRA124_CLK_BLINK 248
+/* 249 */
+/* 250 */
+/* 251 */
+#define TEGRA124_CLK_XUSB_HOST_SRC 252
+#define TEGRA124_CLK_XUSB_FALCON_SRC 253
+#define TEGRA124_CLK_XUSB_FS_SRC 254
+#define TEGRA124_CLK_XUSB_SS_SRC 255
+
+#define TEGRA124_CLK_XUSB_DEV_SRC 256
+#define TEGRA124_CLK_XUSB_DEV 257
+#define TEGRA124_CLK_XUSB_HS_SRC 258
+#define TEGRA124_CLK_SCLK 259
+#define TEGRA124_CLK_HCLK 260
+#define TEGRA124_CLK_PCLK 261
+/* 262 */
+/* 263 */
+#define TEGRA124_CLK_DFLL_REF 264
+#define TEGRA124_CLK_DFLL_SOC 265
+#define TEGRA124_CLK_VI_SENSOR2 266
+#define TEGRA124_CLK_PLL_P_OUT5 267
+#define TEGRA124_CLK_CML0 268
+#define TEGRA124_CLK_CML1 269
+#define TEGRA124_CLK_PLL_C4 270
+#define TEGRA124_CLK_PLL_DP 271
+#define TEGRA124_CLK_PLL_E_MUX 272
+#define TEGRA124_CLK_PLLD_DSI 273
+/* 274 */
+/* 275 */
+/* 276 */
+/* 277 */
+/* 278 */
+/* 279 */
+/* 280 */
+/* 281 */
+/* 282 */
+/* 283 */
+/* 284 */
+/* 285 */
+/* 286 */
+/* 287 */
+
+/* 288 */
+/* 289 */
+/* 290 */
+/* 291 */
+/* 292 */
+/* 293 */
+/* 294 */
+/* 295 */
+/* 296 */
+/* 297 */
+/* 298 */
+/* 299 */
+#define TEGRA124_CLK_AUDIO0_MUX 300
+#define TEGRA124_CLK_AUDIO1_MUX 301
+#define TEGRA124_CLK_AUDIO2_MUX 302
+#define TEGRA124_CLK_AUDIO3_MUX 303
+#define TEGRA124_CLK_AUDIO4_MUX 304
+#define TEGRA124_CLK_SPDIF_MUX 305
+#define TEGRA124_CLK_CLK_OUT_1_MUX 306
+#define TEGRA124_CLK_CLK_OUT_2_MUX 307
+#define TEGRA124_CLK_CLK_OUT_3_MUX 308
+/* 309 */
+/* 310 */
+#define TEGRA124_CLK_SOR0_LVDS 311
+#define TEGRA124_CLK_XUSB_SS_DIV2 312
+
+#define TEGRA124_CLK_PLL_M_UD 313
+#define TEGRA124_CLK_PLL_C_UD 314
+
+#endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_COMMON_H */
index af9bc9a3ddbc561840b60f6f5416f19f846e8567..2860737f04436ba0515895b7b04b85e4bfe7885c 100644 (file)
 /*
- * This header provides constants for binding nvidia,tegra124-car.
- *
- * The first 192 clocks are numbered to match the bits in the CAR's CLK_OUT_ENB
- * registers. These IDs often match those in the CAR's RST_DEVICES registers,
- * but not in all cases. Some bits in CLK_OUT_ENB affect multiple clocks. In
- * this case, those clocks are assigned IDs above 185 in order to highlight
- * this issue. Implementations that interpret these clock IDs as bit values
- * within the CLK_OUT_ENB or RST_DEVICES registers should be careful to
- * explicitly handle these special cases.
- *
- * The balance of the clocks controlled by the CAR are assigned IDs of 185 and
- * above.
+ * This header provides Tegra124-specific constants for binding
+ * nvidia,tegra124-car.
  */
 
+#include <dt-bindings/clock/tegra124-car-common.h>
+
 #ifndef _DT_BINDINGS_CLOCK_TEGRA124_CAR_H
 #define _DT_BINDINGS_CLOCK_TEGRA124_CAR_H
 
-/* 0 */
-/* 1 */
-/* 2 */
-#define TEGRA124_CLK_ISPB 3
-#define TEGRA124_CLK_RTC 4
-#define TEGRA124_CLK_TIMER 5
-#define TEGRA124_CLK_UARTA 6
-/* 7 (register bit affects uartb and vfir) */
-/* 8 */
-#define TEGRA124_CLK_SDMMC2 9
-/* 10 (register bit affects spdif_in and spdif_out) */
-#define TEGRA124_CLK_I2S1 11
-#define TEGRA124_CLK_I2C1 12
-/* 13 */
-#define TEGRA124_CLK_SDMMC1 14
-#define TEGRA124_CLK_SDMMC4 15
-/* 16 */
-#define TEGRA124_CLK_PWM 17
-#define TEGRA124_CLK_I2S2 18
-/* 20 (register bit affects vi and vi_sensor) */
-/* 21 */
-#define TEGRA124_CLK_USBD 22
-#define TEGRA124_CLK_ISP 23
-/* 26 */
-/* 25 */
-#define TEGRA124_CLK_DISP2 26
-#define TEGRA124_CLK_DISP1 27
-#define TEGRA124_CLK_HOST1X 28
-#define TEGRA124_CLK_VCP 29
-#define TEGRA124_CLK_I2S0 30
-/* 31 */
-
-#define TEGRA124_CLK_MC 32
-/* 33 */
-#define TEGRA124_CLK_APBDMA 34
-/* 35 */
-#define TEGRA124_CLK_KBC 36
-/* 37 */
-/* 38 */
-/* 39 (register bit affects fuse and fuse_burn) */
-#define TEGRA124_CLK_KFUSE 40
-#define TEGRA124_CLK_SBC1 41
-#define TEGRA124_CLK_NOR 42
-/* 43 */
-#define TEGRA124_CLK_SBC2 44
-/* 45 */
-#define TEGRA124_CLK_SBC3 46
-#define TEGRA124_CLK_I2C5 47
-#define TEGRA124_CLK_DSIA 48
-/* 49 */
-#define TEGRA124_CLK_MIPI 50
-#define TEGRA124_CLK_HDMI 51
-#define TEGRA124_CLK_CSI 52
-/* 53 */
-#define TEGRA124_CLK_I2C2 54
-#define TEGRA124_CLK_UARTC 55
-#define TEGRA124_CLK_MIPI_CAL 56
-#define TEGRA124_CLK_EMC 57
-#define TEGRA124_CLK_USB2 58
-#define TEGRA124_CLK_USB3 59
-/* 60 */
-#define TEGRA124_CLK_VDE 61
-#define TEGRA124_CLK_BSEA 62
-#define TEGRA124_CLK_BSEV 63
-
-/* 64 */
-#define TEGRA124_CLK_UARTD 65
-/* 66 */
-#define TEGRA124_CLK_I2C3 67
-#define TEGRA124_CLK_SBC4 68
-#define TEGRA124_CLK_SDMMC3 69
-#define TEGRA124_CLK_PCIE 70
-#define TEGRA124_CLK_OWR 71
-#define TEGRA124_CLK_AFI 72
-#define TEGRA124_CLK_CSITE 73
-/* 74 */
-/* 75 */
-#define TEGRA124_CLK_LA 76
-#define TEGRA124_CLK_TRACE 77
-#define TEGRA124_CLK_SOC_THERM 78
-#define TEGRA124_CLK_DTV 79
-/* 80 */
-#define TEGRA124_CLK_I2CSLOW 81
-#define TEGRA124_CLK_DSIB 82
-#define TEGRA124_CLK_TSEC 83
-/* 84 */
-/* 85 */
-/* 86 */
-/* 87 */
-/* 88 */
-#define TEGRA124_CLK_XUSB_HOST 89
-/* 90 */
-#define TEGRA124_CLK_MSENC 91
-#define TEGRA124_CLK_CSUS 92
-/* 93 */
-/* 94 */
-/* 95 (bit affects xusb_dev and xusb_dev_src) */
-
-/* 96 */
-/* 97 */
-/* 98 */
-#define TEGRA124_CLK_MSELECT 99
-#define TEGRA124_CLK_TSENSOR 100
-#define TEGRA124_CLK_I2S3 101
-#define TEGRA124_CLK_I2S4 102
-#define TEGRA124_CLK_I2C4 103
-#define TEGRA124_CLK_SBC5 104
-#define TEGRA124_CLK_SBC6 105
-#define TEGRA124_CLK_D_AUDIO 106
-#define TEGRA124_CLK_APBIF 107
-#define TEGRA124_CLK_DAM0 108
-#define TEGRA124_CLK_DAM1 109
-#define TEGRA124_CLK_DAM2 110
-#define TEGRA124_CLK_HDA2CODEC_2X 111
-/* 112 */
-#define TEGRA124_CLK_AUDIO0_2X 113
-#define TEGRA124_CLK_AUDIO1_2X 114
-#define TEGRA124_CLK_AUDIO2_2X 115
-#define TEGRA124_CLK_AUDIO3_2X 116
-#define TEGRA124_CLK_AUDIO4_2X 117
-#define TEGRA124_CLK_SPDIF_2X 118
-#define TEGRA124_CLK_ACTMON 119
-#define TEGRA124_CLK_EXTERN1 120
-#define TEGRA124_CLK_EXTERN2 121
-#define TEGRA124_CLK_EXTERN3 122
-#define TEGRA124_CLK_SATA_OOB 123
-#define TEGRA124_CLK_SATA 124
-#define TEGRA124_CLK_HDA 125
-/* 126 */
-#define TEGRA124_CLK_SE 127
-
-#define TEGRA124_CLK_HDA2HDMI 128
-#define TEGRA124_CLK_SATA_COLD 129
-/* 130 */
-/* 131 */
-/* 132 */
-/* 133 */
-/* 134 */
-/* 135 */
-/* 136 */
-/* 137 */
-/* 138 */
-/* 139 */
-/* 140 */
-/* 141 */
-/* 142 */
-/* 143 (bit affects xusb_falcon_src, xusb_fs_src, */
-/*      xusb_host_src and xusb_ss_src) */
-#define TEGRA124_CLK_CILAB 144
-#define TEGRA124_CLK_CILCD 145
-#define TEGRA124_CLK_CILE 146
-#define TEGRA124_CLK_DSIALP 147
-#define TEGRA124_CLK_DSIBLP 148
-#define TEGRA124_CLK_ENTROPY 149
-#define TEGRA124_CLK_DDS 150
-/* 151 */
-#define TEGRA124_CLK_DP2 152
-#define TEGRA124_CLK_AMX 153
-#define TEGRA124_CLK_ADX 154
-/* 155 (bit affects dfll_ref and dfll_soc) */
-#define TEGRA124_CLK_XUSB_SS 156
-/* 157 */
-/* 158 */
-/* 159 */
-
-/* 160 */
-/* 161 */
-/* 162 */
-/* 163 */
-/* 164 */
-/* 165 */
-#define TEGRA124_CLK_I2C6 166
-/* 167 */
-/* 168 */
-/* 169 */
-/* 170 */
-#define TEGRA124_CLK_VIM2_CLK 171
-/* 172 */
-/* 173 */
-/* 174 */
-/* 175 */
-#define TEGRA124_CLK_HDMI_AUDIO 176
-#define TEGRA124_CLK_CLK72MHZ 177
-#define TEGRA124_CLK_VIC03 178
-/* 179 */
-#define TEGRA124_CLK_ADX1 180
-#define TEGRA124_CLK_DPAUX 181
-#define TEGRA124_CLK_SOR0 182
-/* 183 */
-#define TEGRA124_CLK_GPU 184
-#define TEGRA124_CLK_AMX1 185
-/* 186 */
-/* 187 */
-/* 188 */
-/* 189 */
-/* 190 */
-/* 191 */
-#define TEGRA124_CLK_UARTB 192
-#define TEGRA124_CLK_VFIR 193
-#define TEGRA124_CLK_SPDIF_IN 194
-#define TEGRA124_CLK_SPDIF_OUT 195
-#define TEGRA124_CLK_VI 196
-#define TEGRA124_CLK_VI_SENSOR 197
-#define TEGRA124_CLK_FUSE 198
-#define TEGRA124_CLK_FUSE_BURN 199
-#define TEGRA124_CLK_CLK_32K 200
-#define TEGRA124_CLK_CLK_M 201
-#define TEGRA124_CLK_CLK_M_DIV2 202
-#define TEGRA124_CLK_CLK_M_DIV4 203
-#define TEGRA124_CLK_PLL_REF 204
-#define TEGRA124_CLK_PLL_C 205
-#define TEGRA124_CLK_PLL_C_OUT1 206
-#define TEGRA124_CLK_PLL_C2 207
-#define TEGRA124_CLK_PLL_C3 208
-#define TEGRA124_CLK_PLL_M 209
-#define TEGRA124_CLK_PLL_M_OUT1 210
-#define TEGRA124_CLK_PLL_P 211
-#define TEGRA124_CLK_PLL_P_OUT1 212
-#define TEGRA124_CLK_PLL_P_OUT2 213
-#define TEGRA124_CLK_PLL_P_OUT3 214
-#define TEGRA124_CLK_PLL_P_OUT4 215
-#define TEGRA124_CLK_PLL_A 216
-#define TEGRA124_CLK_PLL_A_OUT0 217
-#define TEGRA124_CLK_PLL_D 218
-#define TEGRA124_CLK_PLL_D_OUT0 219
-#define TEGRA124_CLK_PLL_D2 220
-#define TEGRA124_CLK_PLL_D2_OUT0 221
-#define TEGRA124_CLK_PLL_U 222
-#define TEGRA124_CLK_PLL_U_480M 223
-
-#define TEGRA124_CLK_PLL_U_60M 224
-#define TEGRA124_CLK_PLL_U_48M 225
-#define TEGRA124_CLK_PLL_U_12M 226
-#define TEGRA124_CLK_PLL_X 227
-#define TEGRA124_CLK_PLL_X_OUT0 228
-#define TEGRA124_CLK_PLL_RE_VCO 229
-#define TEGRA124_CLK_PLL_RE_OUT 230
-#define TEGRA124_CLK_PLL_E 231
-#define TEGRA124_CLK_SPDIF_IN_SYNC 232
-#define TEGRA124_CLK_I2S0_SYNC 233
-#define TEGRA124_CLK_I2S1_SYNC 234
-#define TEGRA124_CLK_I2S2_SYNC 235
-#define TEGRA124_CLK_I2S3_SYNC 236
-#define TEGRA124_CLK_I2S4_SYNC 237
-#define TEGRA124_CLK_VIMCLK_SYNC 238
-#define TEGRA124_CLK_AUDIO0 239
-#define TEGRA124_CLK_AUDIO1 240
-#define TEGRA124_CLK_AUDIO2 241
-#define TEGRA124_CLK_AUDIO3 242
-#define TEGRA124_CLK_AUDIO4 243
-#define TEGRA124_CLK_SPDIF 244
-#define TEGRA124_CLK_CLK_OUT_1 245
-#define TEGRA124_CLK_CLK_OUT_2 246
-#define TEGRA124_CLK_CLK_OUT_3 247
-#define TEGRA124_CLK_BLINK 248
-/* 249 */
-/* 250 */
-/* 251 */
-#define TEGRA124_CLK_XUSB_HOST_SRC 252
-#define TEGRA124_CLK_XUSB_FALCON_SRC 253
-#define TEGRA124_CLK_XUSB_FS_SRC 254
-#define TEGRA124_CLK_XUSB_SS_SRC 255
-
-#define TEGRA124_CLK_XUSB_DEV_SRC 256
-#define TEGRA124_CLK_XUSB_DEV 257
-#define TEGRA124_CLK_XUSB_HS_SRC 258
-#define TEGRA124_CLK_SCLK 259
-#define TEGRA124_CLK_HCLK 260
-#define TEGRA124_CLK_PCLK 261
-#define TEGRA124_CLK_CCLK_G 262
-#define TEGRA124_CLK_CCLK_LP 263
-#define TEGRA124_CLK_DFLL_REF 264
-#define TEGRA124_CLK_DFLL_SOC 265
-#define TEGRA124_CLK_VI_SENSOR2 266
-#define TEGRA124_CLK_PLL_P_OUT5 267
-#define TEGRA124_CLK_CML0 268
-#define TEGRA124_CLK_CML1 269
-#define TEGRA124_CLK_PLL_C4 270
-#define TEGRA124_CLK_PLL_DP 271
-#define TEGRA124_CLK_PLL_E_MUX 272
-/* 273 */
-/* 274 */
-/* 275 */
-/* 276 */
-/* 277 */
-/* 278 */
-/* 279 */
-/* 280 */
-/* 281 */
-/* 282 */
-/* 283 */
-/* 284 */
-/* 285 */
-/* 286 */
-/* 287 */
-
-/* 288 */
-/* 289 */
-/* 290 */
-/* 291 */
-/* 292 */
-/* 293 */
-/* 294 */
-/* 295 */
-/* 296 */
-/* 297 */
-/* 298 */
-/* 299 */
-#define TEGRA124_CLK_AUDIO0_MUX 300
-#define TEGRA124_CLK_AUDIO1_MUX 301
-#define TEGRA124_CLK_AUDIO2_MUX 302
-#define TEGRA124_CLK_AUDIO3_MUX 303
-#define TEGRA124_CLK_AUDIO4_MUX 304
-#define TEGRA124_CLK_SPDIF_MUX 305
-#define TEGRA124_CLK_CLK_OUT_1_MUX 306
-#define TEGRA124_CLK_CLK_OUT_2_MUX 307
-#define TEGRA124_CLK_CLK_OUT_3_MUX 308
-#define TEGRA124_CLK_DSIA_MUX 309
-#define TEGRA124_CLK_DSIB_MUX 310
-#define TEGRA124_CLK_SOR0_LVDS 311
-#define TEGRA124_CLK_XUSB_SS_DIV2 312
+#define TEGRA124_CLK_PLL_X             227
+#define TEGRA124_CLK_PLL_X_OUT0                228
 
-#define TEGRA124_CLK_PLL_M_UD 313
-#define TEGRA124_CLK_PLL_C_UD 314
+#define TEGRA124_CLK_CCLK_G            262
+#define TEGRA124_CLK_CCLK_LP           263
 
-#define TEGRA124_CLK_CLK_MAX 315
+#define TEGRA124_CLK_CLK_MAX           315
 
 #endif /* _DT_BINDINGS_CLOCK_TEGRA124_CAR_H */
diff --git a/include/dt-bindings/thermal/thermal_exynos.h b/include/dt-bindings/thermal/thermal_exynos.h
new file mode 100644 (file)
index 0000000..0646500
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * thermal_exynos.h - Samsung EXYNOS TMU device tree definitions
+ *
+ *  Copyright (C) 2014 Samsung Electronics
+ *  Lukasz Majewski <l.majewski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _EXYNOS_THERMAL_TMU_DT_H
+#define _EXYNOS_THERMAL_TMU_DT_H
+
+#define TYPE_ONE_POINT_TRIMMING 0
+#define TYPE_ONE_POINT_TRIMMING_25 1
+#define TYPE_ONE_POINT_TRIMMING_85 2
+#define TYPE_TWO_POINT_TRIMMING 3
+#define TYPE_NONE 4
+
+#endif /* _EXYNOS_THERMAL_TMU_DT_H */
index c0dadaac26e3725b0fcee4573f817c326a0dbd7f..31eb03d0c7662dddd02fb126a24bc4198ffc65d8 100644 (file)
@@ -158,17 +158,6 @@ enum {
 };
 
 
-/* pool operations */
-enum {
-  POOL_OP_CREATE                       = 0x01,
-  POOL_OP_DELETE                       = 0x02,
-  POOL_OP_AUID_CHANGE                  = 0x03,
-  POOL_OP_CREATE_SNAP                  = 0x11,
-  POOL_OP_DELETE_SNAP                  = 0x12,
-  POOL_OP_CREATE_UNMANAGED_SNAP                = 0x21,
-  POOL_OP_DELETE_UNMANAGED_SNAP                = 0x22,
-};
-
 struct ceph_mon_request_header {
        __le64 have_version;
        __le16 session_mon;
@@ -191,31 +180,6 @@ struct ceph_mon_statfs_reply {
        struct ceph_statfs st;
 } __attribute__ ((packed));
 
-const char *ceph_pool_op_name(int op);
-
-struct ceph_mon_poolop {
-       struct ceph_mon_request_header monhdr;
-       struct ceph_fsid fsid;
-       __le32 pool;
-       __le32 op;
-       __le64 auid;
-       __le64 snapid;
-       __le32 name_len;
-} __attribute__ ((packed));
-
-struct ceph_mon_poolop_reply {
-       struct ceph_mon_request_header monhdr;
-       struct ceph_fsid fsid;
-       __le32 reply_code;
-       __le32 epoch;
-       char has_data;
-       char data[0];
-} __attribute__ ((packed));
-
-struct ceph_mon_unmanaged_snap {
-       __le64 snapid;
-} __attribute__ ((packed));
-
 struct ceph_osd_getmap {
        struct ceph_mon_request_header monhdr;
        struct ceph_fsid fsid;
@@ -307,6 +271,7 @@ enum {
        CEPH_SESSION_RECALL_STATE,
        CEPH_SESSION_FLUSHMSG,
        CEPH_SESSION_FLUSHMSG_ACK,
+       CEPH_SESSION_FORCE_RO,
 };
 
 extern const char *ceph_session_op_name(int op);
index 8b11a79ca1cbf53630d0546b8a6d50446236d3a9..16fff9608848db88dd2c9bca903bc57ca971f527 100644 (file)
@@ -30,8 +30,9 @@
 #define CEPH_OPT_MYIP             (1<<2) /* specified my ip */
 #define CEPH_OPT_NOCRC            (1<<3) /* no data crc on writes */
 #define CEPH_OPT_NOMSGAUTH       (1<<4) /* not require cephx message signature */
+#define CEPH_OPT_TCP_NODELAY     (1<<5) /* TCP_NODELAY on TCP sockets */
 
-#define CEPH_OPT_DEFAULT   (0)
+#define CEPH_OPT_DEFAULT   (CEPH_OPT_TCP_NODELAY)
 
 #define ceph_set_opt(client, opt) \
        (client)->options->flags |= CEPH_OPT_##opt;
index d9d396c165037a7d6f661a41ec0dccec12f7dd1a..e15499422fdcc7686942ce88389d686d9078cd79 100644 (file)
@@ -57,6 +57,7 @@ struct ceph_messenger {
 
        atomic_t stopping;
        bool nocrc;
+       bool tcp_nodelay;
 
        /*
         * the global_seq counts connections i (attempt to) initiate
@@ -264,7 +265,8 @@ extern void ceph_messenger_init(struct ceph_messenger *msgr,
                        struct ceph_entity_addr *myaddr,
                        u64 supported_features,
                        u64 required_features,
-                       bool nocrc);
+                       bool nocrc,
+                       bool tcp_nodelay);
 
 extern void ceph_con_init(struct ceph_connection *con, void *private,
                        const struct ceph_connection_operations *ops,
index deb47e45ac7c29b11642842a733a351f05bedbb3..81810dc21f061ce1acf5129f5dea5e6f7f758e65 100644 (file)
@@ -40,7 +40,7 @@ struct ceph_mon_request {
 };
 
 /*
- * ceph_mon_generic_request is being used for the statfs, poolop and
+ * ceph_mon_generic_request is being used for the statfs and
  * mon_get_version requests which are being done a bit differently
  * because we need to get data back to the caller
  */
@@ -50,7 +50,6 @@ struct ceph_mon_generic_request {
        struct rb_node node;
        int result;
        void *buf;
-       int buf_len;
        struct completion completion;
        struct ceph_msg *request;  /* original request */
        struct ceph_msg *reply;    /* and reply */
@@ -117,10 +116,4 @@ extern int ceph_monc_open_session(struct ceph_mon_client *monc);
 
 extern int ceph_monc_validate_auth(struct ceph_mon_client *monc);
 
-extern int ceph_monc_create_snapid(struct ceph_mon_client *monc,
-                                  u32 pool, u64 *snapid);
-
-extern int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
-                                  u32 pool, u64 snapid);
-
 #endif
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
deleted file mode 100644 (file)
index 0ca5f60..0000000
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- *  linux/include/linux/clk-private.h
- *
- *  Copyright (c) 2010-2011 Jeremy Kerr <jeremy.kerr@canonical.com>
- *  Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#ifndef __LINUX_CLK_PRIVATE_H
-#define __LINUX_CLK_PRIVATE_H
-
-#include <linux/clk-provider.h>
-#include <linux/kref.h>
-#include <linux/list.h>
-
-/*
- * WARNING: Do not include clk-private.h from any file that implements struct
- * clk_ops.  Doing so is a layering violation!
- *
- * This header exists only to allow for statically initialized clock data.  Any
- * static clock data must be defined in a separate file from the logic that
- * implements the clock operations for that same data.
- */
-
-#ifdef CONFIG_COMMON_CLK
-
-struct module;
-
-struct clk {
-       const char              *name;
-       const struct clk_ops    *ops;
-       struct clk_hw           *hw;
-       struct module           *owner;
-       struct clk              *parent;
-       const char              **parent_names;
-       struct clk              **parents;
-       u8                      num_parents;
-       u8                      new_parent_index;
-       unsigned long           rate;
-       unsigned long           new_rate;
-       struct clk              *new_parent;
-       struct clk              *new_child;
-       unsigned long           flags;
-       unsigned int            enable_count;
-       unsigned int            prepare_count;
-       unsigned long           accuracy;
-       int                     phase;
-       struct hlist_head       children;
-       struct hlist_node       child_node;
-       struct hlist_node       debug_node;
-       unsigned int            notifier_count;
-#ifdef CONFIG_DEBUG_FS
-       struct dentry           *dentry;
-#endif
-       struct kref             ref;
-};
-
-/*
- * DOC: Basic clock implementations common to many platforms
- *
- * Each basic clock hardware type is comprised of a structure describing the
- * clock hardware, implementations of the relevant callbacks in struct clk_ops,
- * unique flags for that hardware type, a registration function and an
- * alternative macro for static initialization
- */
-
-#define DEFINE_CLK(_name, _ops, _flags, _parent_names,         \
-               _parents)                                       \
-       static struct clk _name = {                             \
-               .name = #_name,                                 \
-               .ops = &_ops,                                   \
-               .hw = &_name##_hw.hw,                           \
-               .parent_names = _parent_names,                  \
-               .num_parents = ARRAY_SIZE(_parent_names),       \
-               .parents = _parents,                            \
-               .flags = _flags | CLK_IS_BASIC,                 \
-       }
-
-#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate,            \
-                               _fixed_rate_flags)              \
-       static struct clk _name;                                \
-       static const char *_name##_parent_names[] = {};         \
-       static struct clk_fixed_rate _name##_hw = {             \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .fixed_rate = _rate,                            \
-               .flags = _fixed_rate_flags,                     \
-       };                                                      \
-       DEFINE_CLK(_name, clk_fixed_rate_ops, _flags,           \
-                       _name##_parent_names, NULL);
-
-#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr,      \
-                               _flags, _reg, _bit_idx,         \
-                               _gate_flags, _lock)             \
-       static struct clk _name;                                \
-       static const char *_name##_parent_names[] = {           \
-               _parent_name,                                   \
-       };                                                      \
-       static struct clk *_name##_parents[] = {                \
-               _parent_ptr,                                    \
-       };                                                      \
-       static struct clk_gate _name##_hw = {                   \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .reg = _reg,                                    \
-               .bit_idx = _bit_idx,                            \
-               .flags = _gate_flags,                           \
-               .lock = _lock,                                  \
-       };                                                      \
-       DEFINE_CLK(_name, clk_gate_ops, _flags,                 \
-                       _name##_parent_names, _name##_parents);
-
-#define _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,  \
-                               _flags, _reg, _shift, _width,   \
-                               _divider_flags, _table, _lock)  \
-       static struct clk _name;                                \
-       static const char *_name##_parent_names[] = {           \
-               _parent_name,                                   \
-       };                                                      \
-       static struct clk *_name##_parents[] = {                \
-               _parent_ptr,                                    \
-       };                                                      \
-       static struct clk_divider _name##_hw = {                \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .reg = _reg,                                    \
-               .shift = _shift,                                \
-               .width = _width,                                \
-               .flags = _divider_flags,                        \
-               .table = _table,                                \
-               .lock = _lock,                                  \
-       };                                                      \
-       DEFINE_CLK(_name, clk_divider_ops, _flags,              \
-                       _name##_parent_names, _name##_parents);
-
-#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,   \
-                               _flags, _reg, _shift, _width,   \
-                               _divider_flags, _lock)          \
-       _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,   \
-                               _flags, _reg, _shift, _width,   \
-                               _divider_flags, NULL, _lock)
-
-#define DEFINE_CLK_DIVIDER_TABLE(_name, _parent_name,          \
-                               _parent_ptr, _flags, _reg,      \
-                               _shift, _width, _divider_flags, \
-                               _table, _lock)                  \
-       _DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr,   \
-                               _flags, _reg, _shift, _width,   \
-                               _divider_flags, _table, _lock)  \
-
-#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \
-                               _reg, _shift, _width,           \
-                               _mux_flags, _lock)              \
-       static struct clk _name;                                \
-       static struct clk_mux _name##_hw = {                    \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .reg = _reg,                                    \
-               .shift = _shift,                                \
-               .mask = BIT(_width) - 1,                        \
-               .flags = _mux_flags,                            \
-               .lock = _lock,                                  \
-       };                                                      \
-       DEFINE_CLK(_name, clk_mux_ops, _flags, _parent_names,   \
-                       _parents);
-
-#define DEFINE_CLK_FIXED_FACTOR(_name, _parent_name,           \
-                               _parent_ptr, _flags,            \
-                               _mult, _div)                    \
-       static struct clk _name;                                \
-       static const char *_name##_parent_names[] = {           \
-               _parent_name,                                   \
-       };                                                      \
-       static struct clk *_name##_parents[] = {                \
-               _parent_ptr,                                    \
-       };                                                      \
-       static struct clk_fixed_factor _name##_hw = {           \
-               .hw = {                                         \
-                       .clk = &_name,                          \
-               },                                              \
-               .mult = _mult,                                  \
-               .div = _div,                                    \
-       };                                                      \
-       DEFINE_CLK(_name, clk_fixed_factor_ops, _flags,         \
-                       _name##_parent_names, _name##_parents);
-
-/**
- * __clk_init - initialize the data structures in a struct clk
- * @dev:       device initializing this clk, placeholder for now
- * @clk:       clk being initialized
- *
- * Initializes the lists in struct clk, queries the hardware for the
- * parent and rate and sets them both.
- *
- * Any struct clk passed into __clk_init must have the following members
- * populated:
- *     .name
- *     .ops
- *     .hw
- *     .parent_names
- *     .num_parents
- *     .flags
- *
- * It is not necessary to call clk_register if __clk_init is used directly with
- * statically initialized clock data.
- *
- * Returns 0 on success, otherwise an error code.
- */
-int __clk_init(struct device *dev, struct clk *clk);
-
-struct clk *__clk_register(struct device *dev, struct clk_hw *hw);
-
-#endif /* CONFIG_COMMON_CLK */
-#endif /* CLK_PRIVATE_H */
index d936409520f8db609994f7ddab629a99981883dc..5591ea71a8d14054bf923dcad45dc94f71bd029a 100644 (file)
@@ -33,6 +33,7 @@
 #define CLK_GET_ACCURACY_NOCACHE BIT(8) /* do not use the cached clk accuracy */
 
 struct clk_hw;
+struct clk_core;
 struct dentry;
 
 /**
@@ -174,9 +175,12 @@ struct clk_ops {
                                        unsigned long parent_rate);
        long            (*round_rate)(struct clk_hw *hw, unsigned long rate,
                                        unsigned long *parent_rate);
-       long            (*determine_rate)(struct clk_hw *hw, unsigned long rate,
-                                       unsigned long *best_parent_rate,
-                                       struct clk_hw **best_parent_hw);
+       long            (*determine_rate)(struct clk_hw *hw,
+                                         unsigned long rate,
+                                         unsigned long min_rate,
+                                         unsigned long max_rate,
+                                         unsigned long *best_parent_rate,
+                                         struct clk_hw **best_parent_hw);
        int             (*set_parent)(struct clk_hw *hw, u8 index);
        u8              (*get_parent)(struct clk_hw *hw);
        int             (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -216,13 +220,17 @@ struct clk_init_data {
  * clk_foo and then referenced by the struct clk instance that uses struct
  * clk_foo's clk_ops
  *
- * @clk: pointer to the struct clk instance that points back to this struct
- * clk_hw instance
+ * @core: pointer to the struct clk_core instance that points back to this
+ * struct clk_hw instance
+ *
+ * @clk: pointer to the per-user struct clk instance that can be used to call
+ * into the clk API
  *
  * @init: pointer to struct clk_init_data that contains the init data shared
  * with the common clock framework.
  */
 struct clk_hw {
+       struct clk_core *core;
        struct clk *clk;
        const struct clk_init_data *init;
 };
@@ -294,6 +302,7 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 bit_idx,
                u8 clk_gate_flags, spinlock_t *lock);
+void clk_unregister_gate(struct clk *clk);
 
 struct clk_div_table {
        unsigned int    val;
@@ -352,6 +361,17 @@ struct clk_divider {
 #define CLK_DIVIDER_READ_ONLY          BIT(5)
 
 extern const struct clk_ops clk_divider_ops;
+
+unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate,
+               unsigned int val, const struct clk_div_table *table,
+               unsigned long flags);
+long divider_round_rate(struct clk_hw *hw, unsigned long rate,
+               unsigned long *prate, const struct clk_div_table *table,
+               u8 width, unsigned long flags);
+int divider_get_val(unsigned long rate, unsigned long parent_rate,
+               const struct clk_div_table *table, u8 width,
+               unsigned long flags);
+
 struct clk *clk_register_divider(struct device *dev, const char *name,
                const char *parent_name, unsigned long flags,
                void __iomem *reg, u8 shift, u8 width,
@@ -361,6 +381,7 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
                void __iomem *reg, u8 shift, u8 width,
                u8 clk_divider_flags, const struct clk_div_table *table,
                spinlock_t *lock);
+void clk_unregister_divider(struct clk *clk);
 
 /**
  * struct clk_mux - multiplexer clock
@@ -382,6 +403,8 @@ struct clk *clk_register_divider_table(struct device *dev, const char *name,
  *     register, and mask of mux bits are in higher 16-bit of this register.
  *     While setting the mux bits, higher 16-bit should also be updated to
  *     indicate changing mux bits.
+ * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired
+ *     frequency.
  */
 struct clk_mux {
        struct clk_hw   hw;
@@ -396,7 +419,8 @@ struct clk_mux {
 #define CLK_MUX_INDEX_ONE              BIT(0)
 #define CLK_MUX_INDEX_BIT              BIT(1)
 #define CLK_MUX_HIWORD_MASK            BIT(2)
-#define CLK_MUX_READ_ONLY      BIT(3) /* mux setting cannot be changed */
+#define CLK_MUX_READ_ONLY              BIT(3) /* mux can't be changed */
+#define CLK_MUX_ROUND_CLOSEST          BIT(4)
 
 extern const struct clk_ops clk_mux_ops;
 extern const struct clk_ops clk_mux_ro_ops;
@@ -411,6 +435,8 @@ struct clk *clk_register_mux_table(struct device *dev, const char *name,
                void __iomem *reg, u8 shift, u32 mask,
                u8 clk_mux_flags, u32 *table, spinlock_t *lock);
 
+void clk_unregister_mux(struct clk *clk);
+
 void of_fixed_factor_clk_setup(struct device_node *node);
 
 /**
@@ -550,15 +576,29 @@ bool __clk_is_prepared(struct clk *clk);
 bool __clk_is_enabled(struct clk *clk);
 struct clk *__clk_lookup(const char *name);
 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
                              unsigned long *best_parent_rate,
                              struct clk_hw **best_parent_p);
+unsigned long __clk_determine_rate(struct clk_hw *core,
+                                  unsigned long rate,
+                                  unsigned long min_rate,
+                                  unsigned long max_rate);
+long __clk_mux_determine_rate_closest(struct clk_hw *hw, unsigned long rate,
+                             unsigned long min_rate,
+                             unsigned long max_rate,
+                             unsigned long *best_parent_rate,
+                             struct clk_hw **best_parent_p);
+
+static inline void __clk_hw_set_clk(struct clk_hw *dst, struct clk_hw *src)
+{
+       dst->clk = src->clk;
+       dst->core = src->core;
+}
 
 /*
  * FIXME clock api without lock protection
  */
-int __clk_prepare(struct clk *clk);
-void __clk_unprepare(struct clk *clk);
-void __clk_reparent(struct clk *clk, struct clk *new_parent);
 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate);
 
 struct of_device_id;
index c7f258a81761d22b1b204654d3582af55044915a..8381bbfbc3085bcde157c02ea7234826f757a1e8 100644 (file)
@@ -301,6 +301,46 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
  */
 int clk_set_rate(struct clk *clk, unsigned long rate);
 
+/**
+ * clk_has_parent - check if a clock is a possible parent for another
+ * @clk: clock source
+ * @parent: parent clock source
+ *
+ * This function can be used in drivers that need to check that a clock can be
+ * the parent of another without actually changing the parent.
+ *
+ * Returns true if @parent is a possible parent for @clk, false otherwise.
+ */
+bool clk_has_parent(struct clk *clk, struct clk *parent);
+
+/**
+ * clk_set_rate_range - set a rate range for a clock source
+ * @clk: clock source
+ * @min: desired minimum clock rate in Hz, inclusive
+ * @max: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max);
+
+/**
+ * clk_set_min_rate - set a minimum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired minimum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_min_rate(struct clk *clk, unsigned long rate);
+
+/**
+ * clk_set_max_rate - set a maximum clock rate for a clock source
+ * @clk: clock source
+ * @rate: desired maximum clock rate in Hz, inclusive
+ *
+ * Returns success (0) or negative errno.
+ */
+int clk_set_max_rate(struct clk *clk, unsigned long rate);
+
 /**
  * clk_set_parent - set the parent clock source for this clock
  * @clk: clock source
@@ -374,6 +414,11 @@ static inline long clk_round_rate(struct clk *clk, unsigned long rate)
        return 0;
 }
 
+static inline bool clk_has_parent(struct clk *clk, struct clk *parent)
+{
+       return true;
+}
+
 static inline int clk_set_parent(struct clk *clk, struct clk *parent)
 {
        return 0;
diff --git a/include/linux/clk/sunxi.h b/include/linux/clk/sunxi.h
deleted file mode 100644 (file)
index aed28c4..0000000
+++ /dev/null
@@ -1,22 +0,0 @@
-/*
- * Copyright 2013 - Hans de Goede <hdegoede@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- */
-
-#ifndef __LINUX_CLK_SUNXI_H_
-#define __LINUX_CLK_SUNXI_H_
-
-#include <linux/clk.h>
-
-void clk_sunxi_mmc_phase_control(struct clk *clk, u8 sample, u8 output);
-
-#endif
index 3ca9fca827a2f1299ed7eea5ca9ec908e57f2fc8..19c4208f4752fd6957712b8a9e900a6baa0b4945 100644 (file)
@@ -120,6 +120,4 @@ static inline void tegra_cpu_clock_resume(void)
 }
 #endif
 
-void tegra_clocks_apply_init_table(void);
-
 #endif /* __LINUX_CLK_TEGRA_H_ */
index 55ef529a0dbf905995781bd051bf926396b837c5..67844003493de5936809dc1acac72cd88b5bc521 100644 (file)
@@ -15,6 +15,7 @@
 #ifndef __LINUX_CLK_TI_H__
 #define __LINUX_CLK_TI_H__
 
+#include <linux/clk-provider.h>
 #include <linux/clkdev.h>
 
 /**
@@ -217,6 +218,13 @@ struct ti_dt_clk {
 /* Maximum number of clock memmaps */
 #define CLK_MAX_MEMMAPS                        4
 
+/* Static memmap indices */
+enum {
+       TI_CLKM_CM = 0,
+       TI_CLKM_PRM,
+       TI_CLKM_SCRM,
+};
+
 typedef void (*ti_of_clk_init_cb_t)(struct clk_hw *, struct device_node *);
 
 /**
@@ -263,6 +271,8 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
                                           u8 index);
 long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
                                       unsigned long rate,
+                                      unsigned long min_rate,
+                                      unsigned long max_rate,
                                       unsigned long *best_parent_rate,
                                       struct clk_hw **best_parent_clk);
 unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
@@ -272,6 +282,8 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
                                    unsigned long *parent_rate);
 long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
                                        unsigned long rate,
+                                       unsigned long min_rate,
+                                       unsigned long max_rate,
                                        unsigned long *best_parent_rate,
                                        struct clk_hw **best_parent_clk);
 u8 omap2_init_dpll_parent(struct clk_hw *hw);
@@ -348,4 +360,17 @@ extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_ssi_wait;
 extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_dss_usbhost_wait;
 extern const struct clk_hw_omap_ops clkhwops_omap3430es2_iclk_hsotgusb_wait;
 
+#ifdef CONFIG_ATAGS
+int omap3430_clk_legacy_init(void);
+int omap3430es1_clk_legacy_init(void);
+int omap36xx_clk_legacy_init(void);
+int am35xx_clk_legacy_init(void);
+#else
+static inline int omap3430_clk_legacy_init(void) { return -ENXIO; }
+static inline int omap3430es1_clk_legacy_init(void) { return -ENXIO; }
+static inline int omap36xx_clk_legacy_init(void) { return -ENXIO; }
+static inline int am35xx_clk_legacy_init(void) { return -ENXIO; }
+#endif
+
+
 #endif
index d1ec10a940ffffb01a94bcedf9d113e1940619c7..1b45e4a0519b2c34033db91e37fd1f1f0b367f8c 100644 (file)
@@ -202,7 +202,7 @@ static __always_inline void data_access_exceeds_word_size(void)
 {
 }
 
-static __always_inline void __read_once_size(volatile void *p, void *res, int size)
+static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
 {
        switch (size) {
        case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
@@ -259,10 +259,10 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  */
 
 #define READ_ONCE(x) \
-       ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
+       ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
 
 #define WRITE_ONCE(x, val) \
-       ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
+       ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
 
 #endif /* __KERNEL__ */
 
index 92c08cf7670e2afa128a1a15cd454b7f2398fbbf..d8358799c59411f5b715e9dfbb139b94b8dbca8f 100644 (file)
@@ -215,13 +215,16 @@ struct dentry_operations {
 #define DCACHE_LRU_LIST                        0x00080000
 
 #define DCACHE_ENTRY_TYPE              0x00700000
-#define DCACHE_MISS_TYPE               0x00000000 /* Negative dentry */
-#define DCACHE_DIRECTORY_TYPE          0x00100000 /* Normal directory */
-#define DCACHE_AUTODIR_TYPE            0x00200000 /* Lookupless directory (presumed automount) */
-#define DCACHE_SYMLINK_TYPE            0x00300000 /* Symlink */
-#define DCACHE_FILE_TYPE               0x00400000 /* Other file type */
+#define DCACHE_MISS_TYPE               0x00000000 /* Negative dentry (maybe fallthru to nowhere) */
+#define DCACHE_WHITEOUT_TYPE           0x00100000 /* Whiteout dentry (stop pathwalk) */
+#define DCACHE_DIRECTORY_TYPE          0x00200000 /* Normal directory */
+#define DCACHE_AUTODIR_TYPE            0x00300000 /* Lookupless directory (presumed automount) */
+#define DCACHE_REGULAR_TYPE            0x00400000 /* Regular file type (or fallthru to such) */
+#define DCACHE_SPECIAL_TYPE            0x00500000 /* Other file type (or fallthru to such) */
+#define DCACHE_SYMLINK_TYPE            0x00600000 /* Symlink (or fallthru to such) */
 
 #define DCACHE_MAY_FREE                        0x00800000
+#define DCACHE_FALLTHRU                        0x01000000 /* Fall through to lower layer */
 
 extern seqlock_t rename_lock;
 
@@ -423,6 +426,16 @@ static inline unsigned __d_entry_type(const struct dentry *dentry)
        return dentry->d_flags & DCACHE_ENTRY_TYPE;
 }
 
+static inline bool d_is_miss(const struct dentry *dentry)
+{
+       return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
+}
+
+static inline bool d_is_whiteout(const struct dentry *dentry)
+{
+       return __d_entry_type(dentry) == DCACHE_WHITEOUT_TYPE;
+}
+
 static inline bool d_can_lookup(const struct dentry *dentry)
 {
        return __d_entry_type(dentry) == DCACHE_DIRECTORY_TYPE;
@@ -443,14 +456,25 @@ static inline bool d_is_symlink(const struct dentry *dentry)
        return __d_entry_type(dentry) == DCACHE_SYMLINK_TYPE;
 }
 
+static inline bool d_is_reg(const struct dentry *dentry)
+{
+       return __d_entry_type(dentry) == DCACHE_REGULAR_TYPE;
+}
+
+static inline bool d_is_special(const struct dentry *dentry)
+{
+       return __d_entry_type(dentry) == DCACHE_SPECIAL_TYPE;
+}
+
 static inline bool d_is_file(const struct dentry *dentry)
 {
-       return __d_entry_type(dentry) == DCACHE_FILE_TYPE;
+       return d_is_reg(dentry) || d_is_special(dentry);
 }
 
 static inline bool d_is_negative(const struct dentry *dentry)
 {
-       return __d_entry_type(dentry) == DCACHE_MISS_TYPE;
+       // TODO: check d_is_whiteout(dentry) also.
+       return d_is_miss(dentry);
 }
 
 static inline bool d_is_positive(const struct dentry *dentry)
@@ -458,10 +482,75 @@ static inline bool d_is_positive(const struct dentry *dentry)
        return !d_is_negative(dentry);
 }
 
+extern void d_set_fallthru(struct dentry *dentry);
+
+static inline bool d_is_fallthru(const struct dentry *dentry)
+{
+       return dentry->d_flags & DCACHE_FALLTHRU;
+}
+
+
 extern int sysctl_vfs_cache_pressure;
 
 static inline unsigned long vfs_pressure_ratio(unsigned long val)
 {
        return mult_frac(val, sysctl_vfs_cache_pressure, 100);
 }
+
+/**
+ * d_inode - Get the actual inode of this dentry
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode(const struct dentry *dentry)
+{
+       return dentry->d_inode;
+}
+
+/**
+ * d_inode_rcu - Get the actual inode of this dentry with ACCESS_ONCE()
+ * @dentry: The dentry to query
+ *
+ * This is the helper normal filesystems should use to get at their own inodes
+ * in their own dentries and ignore the layering superimposed upon them.
+ */
+static inline struct inode *d_inode_rcu(const struct dentry *dentry)
+{
+       return ACCESS_ONCE(dentry->d_inode);
+}
+
+/**
+ * d_backing_inode - Get upper or lower inode we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get at the inode that will be used
+ * if this dentry were to be opened as a file.  The inode may be on the upper
+ * dentry or it may be on a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own inodes.
+ */
+static inline struct inode *d_backing_inode(const struct dentry *upper)
+{
+       struct inode *inode = upper->d_inode;
+
+       return inode;
+}
+
+/**
+ * d_backing_dentry - Get upper or lower dentry we should be using
+ * @upper: The upper layer
+ *
+ * This is the helper that should be used to get the dentry of the inode that
+ * will be used if this dentry were opened as a file.  It may be the upper
+ * dentry or it may be a lower dentry pinned by the upper.
+ *
+ * Normal filesystems should not use this to access their own dentries.
+ */
+static inline struct dentry *d_backing_dentry(struct dentry *upper)
+{
+       return upper;
+}
+
 #endif /* __LINUX_DCACHE_H */
index 51f7ccadf923c337ddb5627491a958c5e74fa610..4173a8fdad9efd052870b8738547ac1fa1962526 100644 (file)
@@ -33,6 +33,8 @@
  * @units:             Measurment unit for this attribute.
  * @unit_expo:         Exponent used in the data.
  * @size:              Size in bytes for data size.
+ * @logical_minimum:   Logical minimum value for this attribute.
+ * @logical_maximum:   Logical maximum value for this attribute.
  */
 struct hid_sensor_hub_attribute_info {
        u32 usage_id;
@@ -146,6 +148,7 @@ int sensor_hub_input_get_attribute_info(struct hid_sensor_hub_device *hsdev,
 
 /**
 * sensor_hub_input_attr_get_raw_value() - Synchronous read request
+* @hsdev:      Hub device instance.
 * @usage_id:   Attribute usage id of parent physical device as per spec
 * @attr_usage_id:      Attribute usage id as per spec
 * @report_id:  Report id to look for
@@ -160,6 +163,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
                        u32 attr_usage_id, u32 report_id);
 /**
 * sensor_hub_set_feature() - Feature set request
+* @hsdev:      Hub device instance.
 * @report_id:  Report id to look for
 * @field_index:        Field index inside a report
 * @value:      Value to set
@@ -172,6 +176,7 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
 
 /**
 * sensor_hub_get_feature() - Feature get request
+* @hsdev:      Hub device instance.
 * @report_id:  Report id to look for
 * @field_index:        Field index inside a report
 * @value:      Place holder for return value
index 7c7695940dddeae9d3d22129ce4a14eaf70e1a5e..f17da50402a4dad6bf4d4aa907a32e8df1d7dea7 100644 (file)
@@ -130,8 +130,6 @@ extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
  * @probe: Callback for device binding
  * @remove: Callback for device unbinding
  * @shutdown: Callback for device shutdown
- * @suspend: Callback for device suspend
- * @resume: Callback for device resume
  * @alert: Alert callback, for example for the SMBus alert protocol
  * @command: Callback for bus-wide signaling (optional)
  * @driver: Device driver model driver
@@ -174,8 +172,6 @@ struct i2c_driver {
 
        /* driver model interfaces that don't relate to enumeration  */
        void (*shutdown)(struct i2c_client *);
-       int (*suspend)(struct i2c_client *, pm_message_t mesg);
-       int (*resume)(struct i2c_client *);
 
        /* Alert callback, for example for the SMBus alert protocol.
         * The format and meaning of the data value depends on the protocol.
index 420f77b34d02639192bae47ae2fcc95462eaf8e6..e6a6aac451db4614df9cc4e58911f9ff6629620d 100644 (file)
@@ -243,7 +243,6 @@ extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
 extern void gic_send_ipi(unsigned int intr);
 extern unsigned int plat_ipi_call_int_xlate(unsigned int);
 extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
-extern unsigned int gic_get_timer_pending(void);
 extern int gic_get_c0_compare_int(void);
 extern int gic_get_c0_perfcount_int(void);
 #endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
index 75ae2e2631fceaa27915f3d100b1f03244b17500..a19bcf9e762e1d4886e8bd5ed202e4485ba8cf3e 100644 (file)
@@ -156,8 +156,14 @@ typedef enum {
        KDB_REASON_SYSTEM_NMI,  /* In NMI due to SYSTEM cmd; regs valid */
 } kdb_reason_t;
 
+enum kdb_msgsrc {
+       KDB_MSGSRC_INTERNAL, /* direct call to kdb_printf() */
+       KDB_MSGSRC_PRINTK, /* trapped from printk() */
+};
+
 extern int kdb_trap_printk;
-extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
+extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt,
+                                     va_list args);
 extern __printf(1, 2) int kdb_printf(const char *, ...);
 typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
 
index 6d627b92df537ada3886af5850c0fabbac53b66a..2f77e0c651c89874a641c8a04a723aaf60dd2837 100644 (file)
@@ -180,7 +180,6 @@ struct nfs_inode {
         /* NFSv4 state */
        struct list_head        open_states;
        struct nfs_delegation __rcu *delegation;
-       fmode_t                  delegation_state;
        struct rw_semaphore     rwsem;
 
        /* pNFS layout information */
index 38d96ba935c2d7fd91aaf444a598b832b1c0ee4e..4cb3eaa89cf708a57038049db0df75144155d920 100644 (file)
@@ -1167,8 +1167,15 @@ struct nfs41_impl_id {
        struct nfstime4                 date;
 };
 
+struct nfs41_bind_conn_to_session_args {
+       struct nfs_client               *client;
+       struct nfs4_sessionid           sessionid;
+       u32                             dir;
+       bool                            use_conn_in_rdma_mode;
+};
+
 struct nfs41_bind_conn_to_session_res {
-       struct nfs4_session             *session;
+       struct nfs4_sessionid           sessionid;
        u32                             dir;
        bool                            use_conn_in_rdma_mode;
 };
@@ -1185,6 +1192,8 @@ struct nfs41_exchange_id_res {
 
 struct nfs41_create_session_args {
        struct nfs_client              *client;
+       u64                             clientid;
+       uint32_t                        seqid;
        uint32_t                        flags;
        uint32_t                        cb_program;
        struct nfs4_channel_attrs       fc_attrs;       /* Fore Channel */
@@ -1192,7 +1201,11 @@ struct nfs41_create_session_args {
 };
 
 struct nfs41_create_session_res {
-       struct nfs_client              *client;
+       struct nfs4_sessionid           sessionid;
+       uint32_t                        seqid;
+       uint32_t                        flags;
+       struct nfs4_channel_attrs       fc_attrs;       /* Fore Channel */
+       struct nfs4_channel_attrs       bc_attrs;       /* Back Channel */
 };
 
 struct nfs41_reclaim_complete_args {
@@ -1351,7 +1364,7 @@ struct nfs_commit_completion_ops {
 };
 
 struct nfs_commit_info {
-       spinlock_t                      *lock;
+       spinlock_t                      *lock;  /* inode->i_lock */
        struct nfs_mds_commit_info      *mds;
        struct pnfs_ds_commit_info      *ds;
        struct nfs_direct_req           *dreq;  /* O_DIRECT request */
index 19a5d4b23209302bc55cce74c12f69cbd91f260d..0adad4a5419b7cbd7560422de51f9abc97a755a0 100644 (file)
@@ -17,7 +17,6 @@
 
 #include <uapi/linux/nvme.h>
 #include <linux/pci.h>
-#include <linux/miscdevice.h>
 #include <linux/kref.h>
 #include <linux/blk-mq.h>
 
@@ -62,8 +61,6 @@ enum {
        NVME_CSTS_SHST_MASK     = 3 << 2,
 };
 
-#define NVME_VS(major, minor)  (major << 16 | minor)
-
 extern unsigned char nvme_io_timeout;
 #define NVME_IO_TIMEOUT        (nvme_io_timeout * HZ)
 
@@ -91,9 +88,10 @@ struct nvme_dev {
        struct nvme_bar __iomem *bar;
        struct list_head namespaces;
        struct kref kref;
-       struct miscdevice miscdev;
+       struct device *device;
        work_func_t reset_workfn;
        struct work_struct reset_work;
+       struct work_struct probe_work;
        char name[12];
        char serial[20];
        char model[40];
@@ -105,7 +103,6 @@ struct nvme_dev {
        u16 abort_limit;
        u8 event_limit;
        u8 vwc;
-       u8 initialized;
 };
 
 /*
@@ -121,6 +118,7 @@ struct nvme_ns {
        unsigned ns_id;
        int lba_shift;
        int ms;
+       int pi_type;
        u64 mode_select_num_blocks;
        u32 mode_select_block_len;
 };
@@ -138,6 +136,7 @@ struct nvme_iod {
        int nents;              /* Used in scatterlist */
        int length;             /* Of data, in bytes */
        dma_addr_t first_dma;
+       struct scatterlist meta_sg[1]; /* metadata requires single contiguous buffer */
        struct scatterlist sg[0];
 };
 
diff --git a/include/linux/platform_data/bfin_rotary.h b/include/linux/platform_data/bfin_rotary.h
new file mode 100644 (file)
index 0000000..9882937
--- /dev/null
@@ -0,0 +1,117 @@
+/*
+ * board initialization should put one of these structures into platform_data
+ * and place the bfin-rotary onto platform_bus named "bfin-rotary".
+ *
+ * Copyright 2008-2010 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#ifndef _BFIN_ROTARY_H
+#define _BFIN_ROTARY_H
+
+/* mode bitmasks */
+#define ROT_QUAD_ENC   CNTMODE_QUADENC /* quadrature/grey code encoder mode */
+#define ROT_BIN_ENC    CNTMODE_BINENC  /* binary encoder mode */
+#define ROT_UD_CNT     CNTMODE_UDCNT   /* rotary counter mode */
+#define ROT_DIR_CNT    CNTMODE_DIRCNT  /* direction counter mode */
+
+#define ROT_DEBE       DEBE            /* Debounce Enable */
+
+#define ROT_CDGINV     CDGINV          /* CDG Pin Polarity Invert */
+#define ROT_CUDINV     CUDINV          /* CUD Pin Polarity Invert */
+#define ROT_CZMINV     CZMINV          /* CZM Pin Polarity Invert */
+
+struct bfin_rotary_platform_data {
+       /* set rotary UP KEY_### or BTN_### in case you prefer
+        * bfin-rotary to send EV_KEY otherwise set 0
+        */
+       unsigned int rotary_up_key;
+       /* set rotary DOWN KEY_### or BTN_### in case you prefer
+        * bfin-rotary to send EV_KEY otherwise set 0
+        */
+       unsigned int rotary_down_key;
+       /* set rotary BUTTON KEY_### or BTN_### */
+       unsigned int rotary_button_key;
+       /* set rotary Relative Axis REL_### in case you prefer
+        * bfin-rotary to send EV_REL otherwise set 0
+        */
+       unsigned int rotary_rel_code;
+       unsigned short debounce;        /* 0..17 */
+       unsigned short mode;
+       unsigned short pm_wakeup;
+       unsigned short *pin_list;
+};
+
+/* CNT_CONFIG bitmasks */
+#define CNTE           (1 << 0)        /* Counter Enable */
+#define DEBE           (1 << 1)        /* Debounce Enable */
+#define CDGINV         (1 << 4)        /* CDG Pin Polarity Invert */
+#define CUDINV         (1 << 5)        /* CUD Pin Polarity Invert */
+#define CZMINV         (1 << 6)        /* CZM Pin Polarity Invert */
+#define CNTMODE_SHIFT  8
+#define CNTMODE                (0x7 << CNTMODE_SHIFT)  /* Counter Operating Mode */
+#define ZMZC           (1 << 1)        /* CZM Zeroes Counter Enable */
+#define BNDMODE_SHIFT  12
+#define BNDMODE                (0x3 << BNDMODE_SHIFT)  /* Boundary register Mode */
+#define INPDIS         (1 << 15)       /* CUG and CDG Input Disable */
+
+#define CNTMODE_QUADENC        (0 << CNTMODE_SHIFT)    /* quadrature encoder mode */
+#define CNTMODE_BINENC (1 << CNTMODE_SHIFT)    /* binary encoder mode */
+#define CNTMODE_UDCNT  (2 << CNTMODE_SHIFT)    /* up/down counter mode */
+#define CNTMODE_DIRCNT (4 << CNTMODE_SHIFT)    /* direction counter mode */
+#define CNTMODE_DIRTMR (5 << CNTMODE_SHIFT)    /* direction timer mode */
+
+#define BNDMODE_COMP   (0 << BNDMODE_SHIFT)    /* boundary compare mode */
+#define BNDMODE_ZERO   (1 << BNDMODE_SHIFT)    /* boundary compare and zero mode */
+#define BNDMODE_CAPT   (2 << BNDMODE_SHIFT)    /* boundary capture mode */
+#define BNDMODE_AEXT   (3 << BNDMODE_SHIFT)    /* boundary auto-extend mode */
+
+/* CNT_IMASK bitmasks */
+#define ICIE           (1 << 0)        /* Illegal Gray/Binary Code Interrupt Enable */
+#define UCIE           (1 << 1)        /* Up count Interrupt Enable */
+#define DCIE           (1 << 2)        /* Down count Interrupt Enable */
+#define MINCIE         (1 << 3)        /* Min Count Interrupt Enable */
+#define MAXCIE         (1 << 4)        /* Max Count Interrupt Enable */
+#define COV31IE                (1 << 5)        /* Bit 31 Overflow Interrupt Enable */
+#define COV15IE                (1 << 6)        /* Bit 15 Overflow Interrupt Enable */
+#define CZEROIE                (1 << 7)        /* Count to Zero Interrupt Enable */
+#define CZMIE          (1 << 8)        /* CZM Pin Interrupt Enable */
+#define CZMEIE         (1 << 9)        /* CZM Error Interrupt Enable */
+#define CZMZIE         (1 << 10)       /* CZM Zeroes Counter Interrupt Enable */
+
+/* CNT_STATUS bitmasks */
+#define ICII           (1 << 0)        /* Illegal Gray/Binary Code Interrupt Identifier */
+#define UCII           (1 << 1)        /* Up count Interrupt Identifier */
+#define DCII           (1 << 2)        /* Down count Interrupt Identifier */
+#define MINCII         (1 << 3)        /* Min Count Interrupt Identifier */
+#define MAXCII         (1 << 4)        /* Max Count Interrupt Identifier */
+#define COV31II                (1 << 5)        /* Bit 31 Overflow Interrupt Identifier */
+#define COV15II                (1 << 6)        /* Bit 15 Overflow Interrupt Identifier */
+#define CZEROII                (1 << 7)        /* Count to Zero Interrupt Identifier */
+#define CZMII          (1 << 8)        /* CZM Pin Interrupt Identifier */
+#define CZMEII         (1 << 9)        /* CZM Error Interrupt Identifier */
+#define CZMZII         (1 << 10)       /* CZM Zeroes Counter Interrupt Identifier */
+
+/* CNT_COMMAND bitmasks */
+#define W1LCNT         0xf             /* Load Counter Register */
+#define W1LMIN         0xf0            /* Load Min Register */
+#define W1LMAX         0xf00           /* Load Max Register */
+#define W1ZMONCE       (1 << 12)       /* Enable CZM Clear Counter Once */
+
+#define W1LCNT_ZERO    (1 << 0)        /* write 1 to load CNT_COUNTER with zero */
+#define W1LCNT_MIN     (1 << 2)        /* write 1 to load CNT_COUNTER from CNT_MIN */
+#define W1LCNT_MAX     (1 << 3)        /* write 1 to load CNT_COUNTER from CNT_MAX */
+
+#define W1LMIN_ZERO    (1 << 4)        /* write 1 to load CNT_MIN with zero */
+#define W1LMIN_CNT     (1 << 5)        /* write 1 to load CNT_MIN from CNT_COUNTER */
+#define W1LMIN_MAX     (1 << 7)        /* write 1 to load CNT_MIN from CNT_MAX */
+
+#define W1LMAX_ZERO    (1 << 8)        /* write 1 to load CNT_MAX with zero */
+#define W1LMAX_CNT     (1 << 9)        /* write 1 to load CNT_MAX from CNT_COUNTER */
+#define W1LMAX_MIN     (1 << 10)       /* write 1 to load CNT_MAX from CNT_MIN */
+
+/* CNT_DEBOUNCE bitmasks */
+#define DPRESCALE      0xf             /* Load Counter Register */
+
+#endif
index 41c60e5302d703525c1d99f259de9297ea1b36fd..6d77432e14ff971bffd4ca211dccb917768b2c8c 100644 (file)
@@ -363,9 +363,6 @@ extern void show_regs(struct pt_regs *);
  */
 extern void show_stack(struct task_struct *task, unsigned long *sp);
 
-void io_schedule(void);
-long io_schedule_timeout(long timeout);
-
 extern void cpu_init (void);
 extern void trap_init(void);
 extern void update_process_times(int user);
@@ -422,6 +419,13 @@ extern signed long schedule_timeout_uninterruptible(signed long timeout);
 asmlinkage void schedule(void);
 extern void schedule_preempt_disabled(void);
 
+extern long io_schedule_timeout(long timeout);
+
+static inline void io_schedule(void)
+{
+       io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
+}
+
 struct nsproxy;
 struct user_namespace;
 
index 7e61a17030a4843bf0f80fda4dc2ae9f379409ef..694eecb2f1b5dffd8fe7f43c2979528ae2639042 100644 (file)
@@ -89,8 +89,11 @@ void                 rpc_free_iostats(struct rpc_iostats *);
 static inline struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt) { return NULL; }
 static inline void rpc_count_iostats(const struct rpc_task *task,
                                     struct rpc_iostats *stats) {}
-static inline void rpc_count_iostats_metrics(const struct rpc_task *,
-                                            struct rpc_iostats *) {}
+static inline void rpc_count_iostats_metrics(const struct rpc_task *task,
+                                            struct rpc_iostats *stats)
+{
+}
+
 static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {}
 static inline void rpc_free_iostats(struct rpc_iostats *stats) {}
 
index d3204115f15d21dd7ef3d879df2393884795b037..2d67b8998fd8b49d877d65b0b94a022be47d4e28 100644 (file)
@@ -26,6 +26,7 @@
  * @ioctl: Perform ioctl(2) on device file descriptor, supporting VFIO_DEVICE_*
  *         operations documented below
  * @mmap: Perform mmap(2) on a region of the device file descriptor
+ * @request: Request for the bus driver to release the device
  */
 struct vfio_device_ops {
        char    *name;
@@ -38,6 +39,7 @@ struct vfio_device_ops {
        long    (*ioctl)(void *device_data, unsigned int cmd,
                         unsigned long arg);
        int     (*mmap)(void *device_data, struct vm_area_struct *vma);
+       void    (*request)(void *device_data, unsigned int count);
 };
 
 extern int vfio_add_group_dev(struct device *dev,
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
new file mode 100644 (file)
index 0000000..d3583d3
--- /dev/null
@@ -0,0 +1,897 @@
+#ifndef ISCSI_TARGET_CORE_H
+#define ISCSI_TARGET_CORE_H
+
+#include <linux/in.h>
+#include <linux/configfs.h>
+#include <net/sock.h>
+#include <net/tcp.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/iscsi_proto.h>
+#include <target/target_core_base.h>
+
+#define ISCSIT_VERSION                 "v4.1.0"
+#define ISCSI_MAX_DATASN_MISSING_COUNT 16
+#define ISCSI_TX_THREAD_TCP_TIMEOUT    2
+#define ISCSI_RX_THREAD_TCP_TIMEOUT    2
+#define SECONDS_FOR_ASYNC_LOGOUT       10
+#define SECONDS_FOR_ASYNC_TEXT         10
+#define SECONDS_FOR_LOGOUT_COMP                15
+#define WHITE_SPACE                    " \t\v\f\n\r"
+#define ISCSIT_MIN_TAGS                        16
+#define ISCSIT_EXTRA_TAGS              8
+#define ISCSIT_TCP_BACKLOG             256
+
+/* struct iscsi_node_attrib sanity values */
+#define NA_DATAOUT_TIMEOUT             3
+#define NA_DATAOUT_TIMEOUT_MAX         60
+#define NA_DATAOUT_TIMEOUT_MIX         2
+#define NA_DATAOUT_TIMEOUT_RETRIES     5
+#define NA_DATAOUT_TIMEOUT_RETRIES_MAX 15
+#define NA_DATAOUT_TIMEOUT_RETRIES_MIN 1
+#define NA_NOPIN_TIMEOUT               15
+#define NA_NOPIN_TIMEOUT_MAX           60
+#define NA_NOPIN_TIMEOUT_MIN           3
+#define NA_NOPIN_RESPONSE_TIMEOUT      30
+#define NA_NOPIN_RESPONSE_TIMEOUT_MAX  60
+#define NA_NOPIN_RESPONSE_TIMEOUT_MIN  3
+#define NA_RANDOM_DATAIN_PDU_OFFSETS   0
+#define NA_RANDOM_DATAIN_SEQ_OFFSETS   0
+#define NA_RANDOM_R2T_OFFSETS          0
+
+/* struct iscsi_tpg_attrib sanity values */
+#define TA_AUTHENTICATION              1
+#define TA_LOGIN_TIMEOUT               15
+#define TA_LOGIN_TIMEOUT_MAX           30
+#define TA_LOGIN_TIMEOUT_MIN           5
+#define TA_NETIF_TIMEOUT               2
+#define TA_NETIF_TIMEOUT_MAX           15
+#define TA_NETIF_TIMEOUT_MIN           2
+#define TA_GENERATE_NODE_ACLS          0
+#define TA_DEFAULT_CMDSN_DEPTH         64
+#define TA_DEFAULT_CMDSN_DEPTH_MAX     512
+#define TA_DEFAULT_CMDSN_DEPTH_MIN     1
+#define TA_CACHE_DYNAMIC_ACLS          0
+/* Enabled by default in demo mode (generic_node_acls=1) */
+#define TA_DEMO_MODE_WRITE_PROTECT     1
+/* Disabled by default in production mode w/ explict ACLs */
+#define TA_PROD_MODE_WRITE_PROTECT     0
+#define TA_DEMO_MODE_DISCOVERY         1
+#define TA_DEFAULT_ERL                 0
+#define TA_CACHE_CORE_NPS              0
+/* T10 protection information disabled by default */
+#define TA_DEFAULT_T10_PI              0
+
+#define ISCSI_IOV_DATA_BUFFER          5
+
+enum iscsit_transport_type {
+       ISCSI_TCP                               = 0,
+       ISCSI_SCTP_TCP                          = 1,
+       ISCSI_SCTP_UDP                          = 2,
+       ISCSI_IWARP_TCP                         = 3,
+       ISCSI_IWARP_SCTP                        = 4,
+       ISCSI_INFINIBAND                        = 5,
+};
+
+/* RFC-3720 7.1.4  Standard Connection State Diagram for a Target */
+enum target_conn_state_table {
+       TARG_CONN_STATE_FREE                    = 0x1,
+       TARG_CONN_STATE_XPT_UP                  = 0x3,
+       TARG_CONN_STATE_IN_LOGIN                = 0x4,
+       TARG_CONN_STATE_LOGGED_IN               = 0x5,
+       TARG_CONN_STATE_IN_LOGOUT               = 0x6,
+       TARG_CONN_STATE_LOGOUT_REQUESTED        = 0x7,
+       TARG_CONN_STATE_CLEANUP_WAIT            = 0x8,
+};
+
+/* RFC-3720 7.3.2  Session State Diagram for a Target */
+enum target_sess_state_table {
+       TARG_SESS_STATE_FREE                    = 0x1,
+       TARG_SESS_STATE_ACTIVE                  = 0x2,
+       TARG_SESS_STATE_LOGGED_IN               = 0x3,
+       TARG_SESS_STATE_FAILED                  = 0x4,
+       TARG_SESS_STATE_IN_CONTINUE             = 0x5,
+};
+
+/* struct iscsi_data_count->type */
+enum data_count_type {
+       ISCSI_RX_DATA   = 1,
+       ISCSI_TX_DATA   = 2,
+};
+
+/* struct iscsi_datain_req->dr_complete */
+enum datain_req_comp_table {
+       DATAIN_COMPLETE_NORMAL                  = 1,
+       DATAIN_COMPLETE_WITHIN_COMMAND_RECOVERY = 2,
+       DATAIN_COMPLETE_CONNECTION_RECOVERY     = 3,
+};
+
+/* struct iscsi_datain_req->recovery */
+enum datain_req_rec_table {
+       DATAIN_WITHIN_COMMAND_RECOVERY          = 1,
+       DATAIN_CONNECTION_RECOVERY              = 2,
+};
+
+/* struct iscsi_portal_group->state */
+enum tpg_state_table {
+       TPG_STATE_FREE                          = 0,
+       TPG_STATE_ACTIVE                        = 1,
+       TPG_STATE_INACTIVE                      = 2,
+       TPG_STATE_COLD_RESET                    = 3,
+};
+
+/* struct iscsi_tiqn->tiqn_state */
+enum tiqn_state_table {
+       TIQN_STATE_ACTIVE                       = 1,
+       TIQN_STATE_SHUTDOWN                     = 2,
+};
+
+/* struct iscsi_cmd->cmd_flags */
+enum cmd_flags_table {
+       ICF_GOT_LAST_DATAOUT                    = 0x00000001,
+       ICF_GOT_DATACK_SNACK                    = 0x00000002,
+       ICF_NON_IMMEDIATE_UNSOLICITED_DATA      = 0x00000004,
+       ICF_SENT_LAST_R2T                       = 0x00000008,
+       ICF_WITHIN_COMMAND_RECOVERY             = 0x00000010,
+       ICF_CONTIG_MEMORY                       = 0x00000020,
+       ICF_ATTACHED_TO_RQUEUE                  = 0x00000040,
+       ICF_OOO_CMDSN                           = 0x00000080,
+       ICF_SENDTARGETS_ALL                     = 0x00000100,
+       ICF_SENDTARGETS_SINGLE                  = 0x00000200,
+};
+
+/* struct iscsi_cmd->i_state */
+enum cmd_i_state_table {
+       ISTATE_NO_STATE                 = 0,
+       ISTATE_NEW_CMD                  = 1,
+       ISTATE_DEFERRED_CMD             = 2,
+       ISTATE_UNSOLICITED_DATA         = 3,
+       ISTATE_RECEIVE_DATAOUT          = 4,
+       ISTATE_RECEIVE_DATAOUT_RECOVERY = 5,
+       ISTATE_RECEIVED_LAST_DATAOUT    = 6,
+       ISTATE_WITHIN_DATAOUT_RECOVERY  = 7,
+       ISTATE_IN_CONNECTION_RECOVERY   = 8,
+       ISTATE_RECEIVED_TASKMGT         = 9,
+       ISTATE_SEND_ASYNCMSG            = 10,
+       ISTATE_SENT_ASYNCMSG            = 11,
+       ISTATE_SEND_DATAIN              = 12,
+       ISTATE_SEND_LAST_DATAIN         = 13,
+       ISTATE_SENT_LAST_DATAIN         = 14,
+       ISTATE_SEND_LOGOUTRSP           = 15,
+       ISTATE_SENT_LOGOUTRSP           = 16,
+       ISTATE_SEND_NOPIN               = 17,
+       ISTATE_SENT_NOPIN               = 18,
+       ISTATE_SEND_REJECT              = 19,
+       ISTATE_SENT_REJECT              = 20,
+       ISTATE_SEND_R2T                 = 21,
+       ISTATE_SENT_R2T                 = 22,
+       ISTATE_SEND_R2T_RECOVERY        = 23,
+       ISTATE_SENT_R2T_RECOVERY        = 24,
+       ISTATE_SEND_LAST_R2T            = 25,
+       ISTATE_SENT_LAST_R2T            = 26,
+       ISTATE_SEND_LAST_R2T_RECOVERY   = 27,
+       ISTATE_SENT_LAST_R2T_RECOVERY   = 28,
+       ISTATE_SEND_STATUS              = 29,
+       ISTATE_SEND_STATUS_BROKEN_PC    = 30,
+       ISTATE_SENT_STATUS              = 31,
+       ISTATE_SEND_STATUS_RECOVERY     = 32,
+       ISTATE_SENT_STATUS_RECOVERY     = 33,
+       ISTATE_SEND_TASKMGTRSP          = 34,
+       ISTATE_SENT_TASKMGTRSP          = 35,
+       ISTATE_SEND_TEXTRSP             = 36,
+       ISTATE_SENT_TEXTRSP             = 37,
+       ISTATE_SEND_NOPIN_WANT_RESPONSE = 38,
+       ISTATE_SENT_NOPIN_WANT_RESPONSE = 39,
+       ISTATE_SEND_NOPIN_NO_RESPONSE   = 40,
+       ISTATE_REMOVE                   = 41,
+       ISTATE_FREE                     = 42,
+};
+
+/* Used for iscsi_recover_cmdsn() return values */
+enum recover_cmdsn_ret_table {
+       CMDSN_ERROR_CANNOT_RECOVER      = -1,
+       CMDSN_NORMAL_OPERATION          = 0,
+       CMDSN_LOWER_THAN_EXP            = 1,
+       CMDSN_HIGHER_THAN_EXP           = 2,
+       CMDSN_MAXCMDSN_OVERRUN          = 3,
+};
+
+/* Used for iscsi_handle_immediate_data() return values */
+enum immedate_data_ret_table {
+       IMMEDIATE_DATA_CANNOT_RECOVER   = -1,
+       IMMEDIATE_DATA_NORMAL_OPERATION = 0,
+       IMMEDIATE_DATA_ERL1_CRC_FAILURE = 1,
+};
+
+/* Used for iscsi_decide_dataout_action() return values */
+enum dataout_action_ret_table {
+       DATAOUT_CANNOT_RECOVER          = -1,
+       DATAOUT_NORMAL                  = 0,
+       DATAOUT_SEND_R2T                = 1,
+       DATAOUT_SEND_TO_TRANSPORT       = 2,
+       DATAOUT_WITHIN_COMMAND_RECOVERY = 3,
+};
+
+/* Used for struct iscsi_node_auth->naf_flags */
+enum naf_flags_table {
+       NAF_USERID_SET                  = 0x01,
+       NAF_PASSWORD_SET                = 0x02,
+       NAF_USERID_IN_SET               = 0x04,
+       NAF_PASSWORD_IN_SET             = 0x08,
+};
+
+/* Used by various struct timer_list to manage iSCSI specific state */
+enum iscsi_timer_flags_table {
+       ISCSI_TF_RUNNING                = 0x01,
+       ISCSI_TF_STOP                   = 0x02,
+       ISCSI_TF_EXPIRED                = 0x04,
+};
+
+/* Used for struct iscsi_np->np_flags */
+enum np_flags_table {
+       NPF_IP_NETWORK          = 0x00,
+};
+
+/* Used for struct iscsi_np->np_thread_state */
+enum np_thread_state_table {
+       ISCSI_NP_THREAD_ACTIVE          = 1,
+       ISCSI_NP_THREAD_INACTIVE        = 2,
+       ISCSI_NP_THREAD_RESET           = 3,
+       ISCSI_NP_THREAD_SHUTDOWN        = 4,
+       ISCSI_NP_THREAD_EXIT            = 5,
+};
+
+struct iscsi_conn_ops {
+       u8      HeaderDigest;                   /* [0,1] == [None,CRC32C] */
+       u8      DataDigest;                     /* [0,1] == [None,CRC32C] */
+       u32     MaxRecvDataSegmentLength;       /* [512..2**24-1] */
+       u32     MaxXmitDataSegmentLength;       /* [512..2**24-1] */
+       u8      OFMarker;                       /* [0,1] == [No,Yes] */
+       u8      IFMarker;                       /* [0,1] == [No,Yes] */
+       u32     OFMarkInt;                      /* [1..65535] */
+       u32     IFMarkInt;                      /* [1..65535] */
+       /*
+        * iSER specific connection parameters
+        */
+       u32     InitiatorRecvDataSegmentLength; /* [512..2**24-1] */
+       u32     TargetRecvDataSegmentLength;    /* [512..2**24-1] */
+};
+
+struct iscsi_sess_ops {
+       char    InitiatorName[224];
+       char    InitiatorAlias[256];
+       char    TargetName[224];
+       char    TargetAlias[256];
+       char    TargetAddress[256];
+       u16     TargetPortalGroupTag;           /* [0..65535] */
+       u16     MaxConnections;                 /* [1..65535] */
+       u8      InitialR2T;                     /* [0,1] == [No,Yes] */
+       u8      ImmediateData;                  /* [0,1] == [No,Yes] */
+       u32     MaxBurstLength;                 /* [512..2**24-1] */
+       u32     FirstBurstLength;               /* [512..2**24-1] */
+       u16     DefaultTime2Wait;               /* [0..3600] */
+       u16     DefaultTime2Retain;             /* [0..3600] */
+       u16     MaxOutstandingR2T;              /* [1..65535] */
+       u8      DataPDUInOrder;                 /* [0,1] == [No,Yes] */
+       u8      DataSequenceInOrder;            /* [0,1] == [No,Yes] */
+       u8      ErrorRecoveryLevel;             /* [0..2] */
+       u8      SessionType;                    /* [0,1] == [Normal,Discovery]*/
+       /*
+        * iSER specific session parameters
+        */
+       u8      RDMAExtensions;                 /* [0,1] == [No,Yes] */
+};
+
+struct iscsi_queue_req {
+       int                     state;
+       struct iscsi_cmd        *cmd;
+       struct list_head        qr_list;
+};
+
+struct iscsi_data_count {
+       int                     data_length;
+       int                     sync_and_steering;
+       enum data_count_type    type;
+       u32                     iov_count;
+       u32                     ss_iov_count;
+       u32                     ss_marker_count;
+       struct kvec             *iov;
+};
+
+struct iscsi_param_list {
+       bool                    iser;
+       struct list_head        param_list;
+       struct list_head        extra_response_list;
+};
+
+struct iscsi_datain_req {
+       enum datain_req_comp_table dr_complete;
+       int                     generate_recovery_values;
+       enum datain_req_rec_table recovery;
+       u32                     begrun;
+       u32                     runlength;
+       u32                     data_length;
+       u32                     data_offset;
+       u32                     data_sn;
+       u32                     next_burst_len;
+       u32                     read_data_done;
+       u32                     seq_send_order;
+       struct list_head        cmd_datain_node;
+} ____cacheline_aligned;
+
+struct iscsi_ooo_cmdsn {
+       u16                     cid;
+       u32                     batch_count;
+       u32                     cmdsn;
+       u32                     exp_cmdsn;
+       struct iscsi_cmd        *cmd;
+       struct list_head        ooo_list;
+} ____cacheline_aligned;
+
+struct iscsi_datain {
+       u8                      flags;
+       u32                     data_sn;
+       u32                     length;
+       u32                     offset;
+} ____cacheline_aligned;
+
+struct iscsi_r2t {
+       int                     seq_complete;
+       int                     recovery_r2t;
+       int                     sent_r2t;
+       u32                     r2t_sn;
+       u32                     offset;
+       u32                     targ_xfer_tag;
+       u32                     xfer_len;
+       struct list_head        r2t_list;
+} ____cacheline_aligned;
+
+struct iscsi_cmd {
+       enum iscsi_timer_flags_table dataout_timer_flags;
+       /* DataOUT timeout retries */
+       u8                      dataout_timeout_retries;
+       /* Within command recovery count */
+       u8                      error_recovery_count;
+       /* iSCSI dependent state for out or order CmdSNs */
+       enum cmd_i_state_table  deferred_i_state;
+       /* iSCSI dependent state */
+       enum cmd_i_state_table  i_state;
+       /* Command is an immediate command (ISCSI_OP_IMMEDIATE set) */
+       u8                      immediate_cmd;
+       /* Immediate data present */
+       u8                      immediate_data;
+       /* iSCSI Opcode */
+       u8                      iscsi_opcode;
+       /* iSCSI Response Code */
+       u8                      iscsi_response;
+       /* Logout reason when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+       u8                      logout_reason;
+       /* Logout response code when iscsi_opcode == ISCSI_INIT_LOGOUT_CMND */
+       u8                      logout_response;
+       /* MaxCmdSN has been incremented */
+       u8                      maxcmdsn_inc;
+       /* Immediate Unsolicited Dataout */
+       u8                      unsolicited_data;
+       /* Reject reason code */
+       u8                      reject_reason;
+       /* CID contained in logout PDU when opcode == ISCSI_INIT_LOGOUT_CMND */
+       u16                     logout_cid;
+       /* Command flags */
+       enum cmd_flags_table    cmd_flags;
+       /* Initiator Task Tag assigned from Initiator */
+       itt_t                   init_task_tag;
+       /* Target Transfer Tag assigned from Target */
+       u32                     targ_xfer_tag;
+       /* CmdSN assigned from Initiator */
+       u32                     cmd_sn;
+       /* ExpStatSN assigned from Initiator */
+       u32                     exp_stat_sn;
+       /* StatSN assigned to this ITT */
+       u32                     stat_sn;
+       /* DataSN Counter */
+       u32                     data_sn;
+       /* R2TSN Counter */
+       u32                     r2t_sn;
+       /* Last DataSN acknowledged via DataAck SNACK */
+       u32                     acked_data_sn;
+       /* Used for echoing NOPOUT ping data */
+       u32                     buf_ptr_size;
+       /* Used to store DataDigest */
+       u32                     data_crc;
+       /* Counter for MaxOutstandingR2T */
+       u32                     outstanding_r2ts;
+       /* Next R2T Offset when DataSequenceInOrder=Yes */
+       u32                     r2t_offset;
+       /* Iovec current and orig count for iscsi_cmd->iov_data */
+       u32                     iov_data_count;
+       u32                     orig_iov_data_count;
+       /* Number of miscellaneous iovecs used for IP stack calls */
+       u32                     iov_misc_count;
+       /* Number of struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+       u32                     pdu_count;
+       /* Next struct iscsi_pdu to send in struct iscsi_cmd->pdu_list */
+       u32                     pdu_send_order;
+       /* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
+       u32                     pdu_start;
+       /* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
+       u32                     seq_send_order;
+       /* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
+       u32                     seq_count;
+       /* Current struct iscsi_seq in struct iscsi_cmd->seq_list */
+       u32                     seq_no;
+       /* Lowest offset in current DataOUT sequence */
+       u32                     seq_start_offset;
+       /* Highest offset in current DataOUT sequence */
+       u32                     seq_end_offset;
+       /* Total size in bytes received so far of READ data */
+       u32                     read_data_done;
+       /* Total size in bytes received so far of WRITE data */
+       u32                     write_data_done;
+       /* Counter for FirstBurstLength key */
+       u32                     first_burst_len;
+       /* Counter for MaxBurstLength key */
+       u32                     next_burst_len;
+       /* Transfer size used for IP stack calls */
+       u32                     tx_size;
+       /* Buffer used for various purposes */
+       void                    *buf_ptr;
+       /* Used by SendTargets=[iqn.,eui.] discovery */
+       void                    *text_in_ptr;
+       /* See include/linux/dma-mapping.h */
+       enum dma_data_direction data_direction;
+       /* iSCSI PDU Header + CRC */
+       unsigned char           pdu[ISCSI_HDR_LEN + ISCSI_CRC_LEN];
+       /* Number of times struct iscsi_cmd is present in immediate queue */
+       atomic_t                immed_queue_count;
+       atomic_t                response_queue_count;
+       spinlock_t              datain_lock;
+       spinlock_t              dataout_timeout_lock;
+       /* spinlock for protecting struct iscsi_cmd->i_state */
+       spinlock_t              istate_lock;
+       /* spinlock for adding within command recovery entries */
+       spinlock_t              error_lock;
+       /* spinlock for adding R2Ts */
+       spinlock_t              r2t_lock;
+       /* DataIN List */
+       struct list_head        datain_list;
+       /* R2T List */
+       struct list_head        cmd_r2t_list;
+       /* Timer for DataOUT */
+       struct timer_list       dataout_timer;
+       /* Iovecs for SCSI data payload RX/TX w/ kernel level sockets */
+       struct kvec             *iov_data;
+       /* Iovecs for miscellaneous purposes */
+#define ISCSI_MISC_IOVECS                      5
+       struct kvec             iov_misc[ISCSI_MISC_IOVECS];
+       /* Array of struct iscsi_pdu used for DataPDUInOrder=No */
+       struct iscsi_pdu        *pdu_list;
+       /* Current struct iscsi_pdu used for DataPDUInOrder=No */
+       struct iscsi_pdu        *pdu_ptr;
+       /* Array of struct iscsi_seq used for DataSequenceInOrder=No */
+       struct iscsi_seq        *seq_list;
+       /* Current struct iscsi_seq used for DataSequenceInOrder=No */
+       struct iscsi_seq        *seq_ptr;
+       /* TMR Request when iscsi_opcode == ISCSI_OP_SCSI_TMFUNC */
+       struct iscsi_tmr_req    *tmr_req;
+       /* Connection this command is alligient to */
+       struct iscsi_conn       *conn;
+       /* Pointer to connection recovery entry */
+       struct iscsi_conn_recovery *cr;
+       /* Session the command is part of,  used for connection recovery */
+       struct iscsi_session    *sess;
+       /* list_head for connection list */
+       struct list_head        i_conn_node;
+       /* The TCM I/O descriptor that is accessed via container_of() */
+       struct se_cmd           se_cmd;
+       /* Sense buffer that will be mapped into outgoing status */
+#define ISCSI_SENSE_BUFFER_LEN          (TRANSPORT_SENSE_BUFFER + 2)
+       unsigned char           sense_buffer[ISCSI_SENSE_BUFFER_LEN];
+
+       u32                     padding;
+       u8                      pad_bytes[4];
+
+       struct scatterlist      *first_data_sg;
+       u32                     first_data_sg_off;
+       u32                     kmapped_nents;
+       sense_reason_t          sense_reason;
+}  ____cacheline_aligned;
+
+struct iscsi_tmr_req {
+       bool                    task_reassign:1;
+       u32                     exp_data_sn;
+       struct iscsi_cmd        *ref_cmd;
+       struct iscsi_conn_recovery *conn_recovery;
+       struct se_tmr_req       *se_tmr_req;
+};
+
+struct iscsi_conn {
+       wait_queue_head_t       queues_wq;
+       /* Authentication Successful for this connection */
+       u8                      auth_complete;
+       /* State connection is currently in */
+       u8                      conn_state;
+       u8                      conn_logout_reason;
+       u8                      network_transport;
+       enum iscsi_timer_flags_table nopin_timer_flags;
+       enum iscsi_timer_flags_table nopin_response_timer_flags;
+       /* Used to know what thread encountered a transport failure */
+       u8                      which_thread;
+       /* connection id assigned by the Initiator */
+       u16                     cid;
+       /* Remote TCP Port */
+       u16                     login_port;
+       u16                     local_port;
+       int                     net_size;
+       int                     login_family;
+       u32                     auth_id;
+       u32                     conn_flags;
+       /* Used for iscsi_tx_login_rsp() */
+       itt_t                   login_itt;
+       u32                     exp_statsn;
+       /* Per connection status sequence number */
+       u32                     stat_sn;
+       /* IFMarkInt's Current Value */
+       u32                     if_marker;
+       /* OFMarkInt's Current Value */
+       u32                     of_marker;
+       /* Used for calculating OFMarker offset to next PDU */
+       u32                     of_marker_offset;
+#define IPV6_ADDRESS_SPACE                             48
+       unsigned char           login_ip[IPV6_ADDRESS_SPACE];
+       unsigned char           local_ip[IPV6_ADDRESS_SPACE];
+       int                     conn_usage_count;
+       int                     conn_waiting_on_uc;
+       atomic_t                check_immediate_queue;
+       atomic_t                conn_logout_remove;
+       atomic_t                connection_exit;
+       atomic_t                connection_recovery;
+       atomic_t                connection_reinstatement;
+       atomic_t                connection_wait_rcfr;
+       atomic_t                sleep_on_conn_wait_comp;
+       atomic_t                transport_failed;
+       struct completion       conn_post_wait_comp;
+       struct completion       conn_wait_comp;
+       struct completion       conn_wait_rcfr_comp;
+       struct completion       conn_waiting_on_uc_comp;
+       struct completion       conn_logout_comp;
+       struct completion       tx_half_close_comp;
+       struct completion       rx_half_close_comp;
+       /* socket used by this connection */
+       struct socket           *sock;
+       void                    (*orig_data_ready)(struct sock *);
+       void                    (*orig_state_change)(struct sock *);
+#define LOGIN_FLAGS_READ_ACTIVE                1
+#define LOGIN_FLAGS_CLOSED             2
+#define LOGIN_FLAGS_READY              4
+       unsigned long           login_flags;
+       struct delayed_work     login_work;
+       struct delayed_work     login_cleanup_work;
+       struct iscsi_login      *login;
+       struct timer_list       nopin_timer;
+       struct timer_list       nopin_response_timer;
+       struct timer_list       transport_timer;
+       struct task_struct      *login_kworker;
+       /* Spinlock used for add/deleting cmd's from conn_cmd_list */
+       spinlock_t              cmd_lock;
+       spinlock_t              conn_usage_lock;
+       spinlock_t              immed_queue_lock;
+       spinlock_t              nopin_timer_lock;
+       spinlock_t              response_queue_lock;
+       spinlock_t              state_lock;
+       /* libcrypto RX and TX contexts for crc32c */
+       struct hash_desc        conn_rx_hash;
+       struct hash_desc        conn_tx_hash;
+       /* Used for scheduling TX and RX connection kthreads */
+       cpumask_var_t           conn_cpumask;
+       unsigned int            conn_rx_reset_cpumask:1;
+       unsigned int            conn_tx_reset_cpumask:1;
+       /* list_head of struct iscsi_cmd for this connection */
+       struct list_head        conn_cmd_list;
+       struct list_head        immed_queue_list;
+       struct list_head        response_queue_list;
+       struct iscsi_conn_ops   *conn_ops;
+       struct iscsi_login      *conn_login;
+       struct iscsit_transport *conn_transport;
+       struct iscsi_param_list *param_list;
+       /* Used for per connection auth state machine */
+       void                    *auth_protocol;
+       void                    *context;
+       struct iscsi_login_thread_s *login_thread;
+       struct iscsi_portal_group *tpg;
+       struct iscsi_tpg_np     *tpg_np;
+       /* Pointer to parent session */
+       struct iscsi_session    *sess;
+       /* Pointer to thread_set in use for this conn's threads */
+       struct iscsi_thread_set *thread_set;
+       /* list_head for session connection list */
+       struct list_head        conn_list;
+} ____cacheline_aligned;
+
+struct iscsi_conn_recovery {
+       u16                     cid;
+       u32                     cmd_count;
+       u32                     maxrecvdatasegmentlength;
+       u32                     maxxmitdatasegmentlength;
+       int                     ready_for_reallegiance;
+       struct list_head        conn_recovery_cmd_list;
+       spinlock_t              conn_recovery_cmd_lock;
+       struct timer_list       time2retain_timer;
+       struct iscsi_session    *sess;
+       struct list_head        cr_list;
+}  ____cacheline_aligned;
+
+struct iscsi_session {
+       u8                      initiator_vendor;
+       u8                      isid[6];
+       enum iscsi_timer_flags_table time2retain_timer_flags;
+       u8                      version_active;
+       u16                     cid_called;
+       u16                     conn_recovery_count;
+       u16                     tsih;
+       /* state session is currently in */
+       u32                     session_state;
+       /* session wide counter: initiator assigned task tag */
+       itt_t                   init_task_tag;
+       /* session wide counter: target assigned task tag */
+       u32                     targ_xfer_tag;
+       u32                     cmdsn_window;
+
+       /* protects cmdsn values */
+       struct mutex            cmdsn_mutex;
+       /* session wide counter: expected command sequence number */
+       u32                     exp_cmd_sn;
+       /* session wide counter: maximum allowed command sequence number */
+       u32                     max_cmd_sn;
+       struct list_head        sess_ooo_cmdsn_list;
+
+       /* LIO specific session ID */
+       u32                     sid;
+       char                    auth_type[8];
+       /* unique within the target */
+       int                     session_index;
+       /* Used for session reference counting */
+       int                     session_usage_count;
+       int                     session_waiting_on_uc;
+       atomic_long_t           cmd_pdus;
+       atomic_long_t           rsp_pdus;
+       atomic_long_t           tx_data_octets;
+       atomic_long_t           rx_data_octets;
+       atomic_long_t           conn_digest_errors;
+       atomic_long_t           conn_timeout_errors;
+       u64                     creation_time;
+       /* Number of active connections */
+       atomic_t                nconn;
+       atomic_t                session_continuation;
+       atomic_t                session_fall_back_to_erl0;
+       atomic_t                session_logout;
+       atomic_t                session_reinstatement;
+       atomic_t                session_stop_active;
+       atomic_t                sleep_on_sess_wait_comp;
+       /* connection list */
+       struct list_head        sess_conn_list;
+       struct list_head        cr_active_list;
+       struct list_head        cr_inactive_list;
+       spinlock_t              conn_lock;
+       spinlock_t              cr_a_lock;
+       spinlock_t              cr_i_lock;
+       spinlock_t              session_usage_lock;
+       spinlock_t              ttt_lock;
+       struct completion       async_msg_comp;
+       struct completion       reinstatement_comp;
+       struct completion       session_wait_comp;
+       struct completion       session_waiting_on_uc_comp;
+       struct timer_list       time2retain_timer;
+       struct iscsi_sess_ops   *sess_ops;
+       struct se_session       *se_sess;
+       struct iscsi_portal_group *tpg;
+} ____cacheline_aligned;
+
+struct iscsi_login {
+       u8 auth_complete;
+       u8 checked_for_existing;
+       u8 current_stage;
+       u8 leading_connection;
+       u8 first_request;
+       u8 version_min;
+       u8 version_max;
+       u8 login_complete;
+       u8 login_failed;
+       bool zero_tsih;
+       char isid[6];
+       u32 cmd_sn;
+       itt_t init_task_tag;
+       u32 initial_exp_statsn;
+       u32 rsp_length;
+       u16 cid;
+       u16 tsih;
+       char req[ISCSI_HDR_LEN];
+       char rsp[ISCSI_HDR_LEN];
+       char *req_buf;
+       char *rsp_buf;
+       struct iscsi_conn *conn;
+       struct iscsi_np *np;
+} ____cacheline_aligned;
+
+struct iscsi_node_attrib {
+       u32                     dataout_timeout;
+       u32                     dataout_timeout_retries;
+       u32                     default_erl;
+       u32                     nopin_timeout;
+       u32                     nopin_response_timeout;
+       u32                     random_datain_pdu_offsets;
+       u32                     random_datain_seq_offsets;
+       u32                     random_r2t_offsets;
+       u32                     tmr_cold_reset;
+       u32                     tmr_warm_reset;
+       struct iscsi_node_acl *nacl;
+};
+
+struct se_dev_entry_s;
+
+struct iscsi_node_auth {
+       enum naf_flags_table    naf_flags;
+       int                     authenticate_target;
+       /* Used for iscsit_global->discovery_auth,
+        * set to zero (auth disabled) by default */
+       int                     enforce_discovery_auth;
+#define MAX_USER_LEN                           256
+#define MAX_PASS_LEN                           256
+       char                    userid[MAX_USER_LEN];
+       char                    password[MAX_PASS_LEN];
+       char                    userid_mutual[MAX_USER_LEN];
+       char                    password_mutual[MAX_PASS_LEN];
+};
+
+#include "iscsi_target_stat.h"
+
+struct iscsi_node_stat_grps {
+       struct config_group     iscsi_sess_stats_group;
+       struct config_group     iscsi_conn_stats_group;
+};
+
+struct iscsi_node_acl {
+       struct iscsi_node_attrib node_attrib;
+       struct iscsi_node_auth  node_auth;
+       struct iscsi_node_stat_grps node_stat_grps;
+       struct se_node_acl      se_node_acl;
+};
+
+struct iscsi_tpg_attrib {
+       u32                     authentication;
+       u32                     login_timeout;
+       u32                     netif_timeout;
+       u32                     generate_node_acls;
+       u32                     cache_dynamic_acls;
+       u32                     default_cmdsn_depth;
+       u32                     demo_mode_write_protect;
+       u32                     prod_mode_write_protect;
+       u32                     demo_mode_discovery;
+       u32                     default_erl;
+       u8                      t10_pi;
+       struct iscsi_portal_group *tpg;
+};
+
+struct iscsi_np {
+       int                     np_network_transport;
+       int                     np_ip_proto;
+       int                     np_sock_type;
+       enum np_thread_state_table np_thread_state;
+       bool                    enabled;
+       enum iscsi_timer_flags_table np_login_timer_flags;
+       u32                     np_exports;
+       enum np_flags_table     np_flags;
+       unsigned char           np_ip[IPV6_ADDRESS_SPACE];
+       u16                     np_port;
+       spinlock_t              np_thread_lock;
+       struct completion       np_restart_comp;
+       struct socket           *np_socket;
+       struct __kernel_sockaddr_storage np_sockaddr;
+       struct task_struct      *np_thread;
+       struct timer_list       np_login_timer;
+       void                    *np_context;
+       struct iscsit_transport *np_transport;
+       struct list_head        np_list;
+} ____cacheline_aligned;
+
+struct iscsi_tpg_np {
+       struct iscsi_np         *tpg_np;
+       struct iscsi_portal_group *tpg;
+       struct iscsi_tpg_np     *tpg_np_parent;
+       struct list_head        tpg_np_list;
+       struct list_head        tpg_np_child_list;
+       struct list_head        tpg_np_parent_list;
+       struct se_tpg_np        se_tpg_np;
+       spinlock_t              tpg_np_parent_lock;
+       struct completion       tpg_np_comp;
+       struct kref             tpg_np_kref;
+};
+
+struct iscsi_portal_group {
+       unsigned char           tpg_chap_id;
+       /* TPG State */
+       enum tpg_state_table    tpg_state;
+       /* Target Portal Group Tag */
+       u16                     tpgt;
+       /* Id assigned to target sessions */
+       u16                     ntsih;
+       /* Number of active sessions */
+       u32                     nsessions;
+       /* Number of Network Portals available for this TPG */
+       u32                     num_tpg_nps;
+       /* Per TPG LIO specific session ID. */
+       u32                     sid;
+       /* Spinlock for adding/removing Network Portals */
+       spinlock_t              tpg_np_lock;
+       spinlock_t              tpg_state_lock;
+       struct se_portal_group tpg_se_tpg;
+       struct mutex            tpg_access_lock;
+       struct semaphore        np_login_sem;
+       struct iscsi_tpg_attrib tpg_attrib;
+       struct iscsi_node_auth  tpg_demo_auth;
+       /* Pointer to default list of iSCSI parameters for TPG */
+       struct iscsi_param_list *param_list;
+       struct iscsi_tiqn       *tpg_tiqn;
+       struct list_head        tpg_gnp_list;
+       struct list_head        tpg_list;
+} ____cacheline_aligned;
+
+struct iscsi_wwn_stat_grps {
+       struct config_group     iscsi_stat_group;
+       struct config_group     iscsi_instance_group;
+       struct config_group     iscsi_sess_err_group;
+       struct config_group     iscsi_tgt_attr_group;
+       struct config_group     iscsi_login_stats_group;
+       struct config_group     iscsi_logout_stats_group;
+};
+
+struct iscsi_tiqn {
+#define ISCSI_IQN_LEN                          224
+       unsigned char           tiqn[ISCSI_IQN_LEN];
+       enum tiqn_state_table   tiqn_state;
+       int                     tiqn_access_count;
+       u32                     tiqn_active_tpgs;
+       u32                     tiqn_ntpgs;
+       u32                     tiqn_num_tpg_nps;
+       u32                     tiqn_nsessions;
+       struct list_head        tiqn_list;
+       struct list_head        tiqn_tpg_list;
+       spinlock_t              tiqn_state_lock;
+       spinlock_t              tiqn_tpg_lock;
+       struct se_wwn           tiqn_wwn;
+       struct iscsi_wwn_stat_grps tiqn_stat_grps;
+       int                     tiqn_index;
+       struct iscsi_sess_err_stats  sess_err_stats;
+       struct iscsi_login_stats     login_stats;
+       struct iscsi_logout_stats    logout_stats;
+} ____cacheline_aligned;
+
+struct iscsit_global {
+       /* In core shutdown */
+       u32                     in_shutdown;
+       u32                     active_ts;
+       /* Unique identifier used for the authentication daemon */
+       u32                     auth_id;
+       u32                     inactive_ts;
+       /* Thread Set bitmap count */
+       int                     ts_bitmap_count;
+       /* Thread Set bitmap pointer */
+       unsigned long           *ts_bitmap;
+       /* Used for iSCSI discovery session authentication */
+       struct iscsi_node_acl   discovery_acl;
+       struct iscsi_portal_group       *discovery_tpg;
+};
+
+static inline u32 session_get_next_ttt(struct iscsi_session *session)
+{
+       u32 ttt;
+
+       spin_lock_bh(&session->ttt_lock);
+       ttt = session->targ_xfer_tag++;
+       if (ttt == 0xFFFFFFFF)
+               ttt = session->targ_xfer_tag++;
+       spin_unlock_bh(&session->ttt_lock);
+
+       return ttt;
+}
+
+extern struct iscsi_cmd *iscsit_find_cmd_from_itt(struct iscsi_conn *, itt_t);
+#endif /* ISCSI_TARGET_CORE_H */
diff --git a/include/target/iscsi/iscsi_target_stat.h b/include/target/iscsi/iscsi_target_stat.h
new file mode 100644 (file)
index 0000000..3ff76b4
--- /dev/null
@@ -0,0 +1,64 @@
+#ifndef ISCSI_TARGET_STAT_H
+#define ISCSI_TARGET_STAT_H
+
+/*
+ * For struct iscsi_tiqn->tiqn_wwn default groups
+ */
+extern struct config_item_type iscsi_stat_instance_cit;
+extern struct config_item_type iscsi_stat_sess_err_cit;
+extern struct config_item_type iscsi_stat_tgt_attr_cit;
+extern struct config_item_type iscsi_stat_login_cit;
+extern struct config_item_type iscsi_stat_logout_cit;
+
+/*
+ * For struct iscsi_session->se_sess default groups
+ */
+extern struct config_item_type iscsi_stat_sess_cit;
+
+/* iSCSI session error types */
+#define ISCSI_SESS_ERR_UNKNOWN         0
+#define ISCSI_SESS_ERR_DIGEST          1
+#define ISCSI_SESS_ERR_CXN_TIMEOUT     2
+#define ISCSI_SESS_ERR_PDU_FORMAT      3
+
+/* iSCSI session error stats */
+struct iscsi_sess_err_stats {
+       spinlock_t      lock;
+       u32             digest_errors;
+       u32             cxn_timeout_errors;
+       u32             pdu_format_errors;
+       u32             last_sess_failure_type;
+       char            last_sess_fail_rem_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI login failure types (sub oids) */
+#define ISCSI_LOGIN_FAIL_OTHER         2
+#define ISCSI_LOGIN_FAIL_REDIRECT      3
+#define ISCSI_LOGIN_FAIL_AUTHORIZE     4
+#define ISCSI_LOGIN_FAIL_AUTHENTICATE  5
+#define ISCSI_LOGIN_FAIL_NEGOTIATE     6
+
+/* iSCSI login stats */
+struct iscsi_login_stats {
+       spinlock_t      lock;
+       u32             accepts;
+       u32             other_fails;
+       u32             redirects;
+       u32             authorize_fails;
+       u32             authenticate_fails;
+       u32             negotiate_fails;        /* used for notifications */
+       u64             last_fail_time;         /* time stamp (jiffies) */
+       u32             last_fail_type;
+       int             last_intr_fail_ip_family;
+       unsigned char   last_intr_fail_ip_addr[IPV6_ADDRESS_SPACE];
+       char            last_intr_fail_name[224];
+} ____cacheline_aligned;
+
+/* iSCSI logout stats */
+struct iscsi_logout_stats {
+       spinlock_t      lock;
+       u32             normal_logouts;
+       u32             abnormal_logouts;
+} ____cacheline_aligned;
+
+#endif   /*** ISCSI_TARGET_STAT_H ***/
index daef9daa500c11f0ff7d92151fafbf9080a80e02..e6bb166f12c212aac238d0d951594cce65851145 100644 (file)
@@ -1,6 +1,6 @@
 #include <linux/module.h>
 #include <linux/list.h>
-#include "../../../drivers/target/iscsi/iscsi_target_core.h"
+#include "iscsi_target_core.h"
 
 struct iscsit_transport {
 #define ISCSIT_TRANSPORT_NAME  16
index 4a8795a87b9e99f30ee07f43fdce3984ddc658e0..672150b6aaf52bc24c640d16f1f7c841e0be655d 100644 (file)
@@ -407,7 +407,7 @@ struct t10_reservation {
        /* Activate Persistence across Target Power Loss enabled
         * for SCSI device */
        int pr_aptpl_active;
-#define PR_APTPL_BUF_LEN                       8192
+#define PR_APTPL_BUF_LEN                       262144
        u32 pr_generation;
        spinlock_t registration_lock;
        spinlock_t aptpl_reg_lock;
index 611e1c5893b490d93d0c87a1a2d383d6d8ef435d..b6dec05c7196a22511e346242724406eef88265b 100644 (file)
@@ -495,8 +495,7 @@ struct btrfs_ioctl_send_args {
 
 /* Error codes as returned by the kernel */
 enum btrfs_err_code {
-       notused,
-       BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
+       BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET = 1,
        BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
        BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
        BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
index 26386cf3db444cbca7bc9e7138f8b0e01c0669b6..aef9a81b2d75b9524000f3b1b1bf858464e575e2 100644 (file)
@@ -115,7 +115,13 @@ struct nvme_id_ns {
        __le16                  nawun;
        __le16                  nawupf;
        __le16                  nacwu;
-       __u8                    rsvd40[80];
+       __le16                  nabsn;
+       __le16                  nabo;
+       __le16                  nabspf;
+       __u16                   rsvd46;
+       __le64                  nvmcap[2];
+       __u8                    rsvd64[40];
+       __u8                    nguid[16];
        __u8                    eui64[8];
        struct nvme_lbaf        lbaf[16];
        __u8                    rsvd192[192];
@@ -124,10 +130,22 @@ struct nvme_id_ns {
 
 enum {
        NVME_NS_FEAT_THIN       = 1 << 0,
+       NVME_NS_FLBAS_LBA_MASK  = 0xf,
+       NVME_NS_FLBAS_META_EXT  = 0x10,
        NVME_LBAF_RP_BEST       = 0,
        NVME_LBAF_RP_BETTER     = 1,
        NVME_LBAF_RP_GOOD       = 2,
        NVME_LBAF_RP_DEGRADED   = 3,
+       NVME_NS_DPC_PI_LAST     = 1 << 4,
+       NVME_NS_DPC_PI_FIRST    = 1 << 3,
+       NVME_NS_DPC_PI_TYPE3    = 1 << 2,
+       NVME_NS_DPC_PI_TYPE2    = 1 << 1,
+       NVME_NS_DPC_PI_TYPE1    = 1 << 0,
+       NVME_NS_DPS_PI_FIRST    = 1 << 3,
+       NVME_NS_DPS_PI_MASK     = 0x7,
+       NVME_NS_DPS_PI_TYPE1    = 1,
+       NVME_NS_DPS_PI_TYPE2    = 2,
+       NVME_NS_DPS_PI_TYPE3    = 3,
 };
 
 struct nvme_smart_log {
@@ -261,6 +279,10 @@ enum {
        NVME_RW_DSM_LATENCY_LOW         = 3 << 4,
        NVME_RW_DSM_SEQ_REQ             = 1 << 6,
        NVME_RW_DSM_COMPRESSED          = 1 << 7,
+       NVME_RW_PRINFO_PRCHK_REF        = 1 << 10,
+       NVME_RW_PRINFO_PRCHK_APP        = 1 << 11,
+       NVME_RW_PRINFO_PRCHK_GUARD      = 1 << 12,
+       NVME_RW_PRINFO_PRACT            = 1 << 13,
 };
 
 struct nvme_dsm_cmd {
@@ -549,6 +571,8 @@ struct nvme_passthru_cmd {
        __u32   result;
 };
 
+#define NVME_VS(major, minor) (((major) << 16) | ((minor) << 8))
+
 #define nvme_admin_cmd nvme_passthru_cmd
 
 #define NVME_IOCTL_ID          _IO('N', 0x40)
index 89f63503f903dd25f6c6a99594f7ace7dc5bac68..31891d9535e2a4ede364627a805d6d346fae8b9c 100644 (file)
@@ -185,4 +185,9 @@ struct prctl_mm_map {
 #define PR_MPX_ENABLE_MANAGEMENT  43
 #define PR_MPX_DISABLE_MANAGEMENT 44
 
+#define PR_SET_FP_MODE         45
+#define PR_GET_FP_MODE         46
+# define PR_FP_MODE_FR         (1 << 0)        /* 64b FP registers */
+# define PR_FP_MODE_FRE                (1 << 1)        /* 32b compatibility */
+
 #endif /* _LINUX_PRCTL_H */
index 29715d27548f21b20303861c24f56faeab835cc9..82889c30f4f5a79fb820c1dd20e1e8a0f99b4bf0 100644 (file)
@@ -333,6 +333,7 @@ enum {
        VFIO_PCI_MSI_IRQ_INDEX,
        VFIO_PCI_MSIX_IRQ_INDEX,
        VFIO_PCI_ERR_IRQ_INDEX,
+       VFIO_PCI_REQ_IRQ_INDEX,
        VFIO_PCI_NUM_IRQS
 };
 
index 867cc5084afbfce8ab24cfd87d5f52cda93697de..b513e662d8e4999401f3c7366368bfb3c7900664 100644 (file)
@@ -90,6 +90,7 @@ enum {
 };
 
 enum {
+       IB_USER_VERBS_EX_CMD_QUERY_DEVICE = IB_USER_VERBS_CMD_QUERY_DEVICE,
        IB_USER_VERBS_EX_CMD_CREATE_FLOW = IB_USER_VERBS_CMD_THRESHOLD,
        IB_USER_VERBS_EX_CMD_DESTROY_FLOW,
 };
@@ -201,6 +202,28 @@ struct ib_uverbs_query_device_resp {
        __u8  reserved[4];
 };
 
+struct ib_uverbs_ex_query_device {
+       __u32 comp_mask;
+       __u32 reserved;
+};
+
+struct ib_uverbs_odp_caps {
+       __u64 general_caps;
+       struct {
+               __u32 rc_odp_caps;
+               __u32 uc_odp_caps;
+               __u32 ud_odp_caps;
+       } per_transport_caps;
+       __u32 reserved;
+};
+
+struct ib_uverbs_ex_query_device_resp {
+       struct ib_uverbs_query_device_resp base;
+       __u32 comp_mask;
+       __u32 response_length;
+       struct ib_uverbs_odp_caps odp_caps;
+};
+
 struct ib_uverbs_query_port {
        __u64 response;
        __u8  port_num;
index 058e3671fa11ecab043f414946ae539b8fadf2ec..f5dbc6d4261bcb47e7d7dccfa6385f649ecefa0a 100644 (file)
@@ -921,7 +921,7 @@ config NUMA_BALANCING_DEFAULT_ENABLED
          machine.
 
 menuconfig CGROUPS
-       boolean "Control Group support"
+       bool "Control Group support"
        select KERNFS
        help
          This option adds support for grouping sets of processes together, for
@@ -1290,8 +1290,8 @@ endif
 config CC_OPTIMIZE_FOR_SIZE
        bool "Optimize for size"
        help
-         Enabling this option will pass "-Os" instead of "-O2" to gcc
-         resulting in a smaller kernel.
+         Enabling this option will pass "-Os" instead of "-O2" to
+         your compiler resulting in a smaller kernel.
 
          If unsure, say N.
 
@@ -1762,7 +1762,7 @@ config SLABINFO
        default y
 
 config RT_MUTEXES
-       boolean
+       bool
 
 config BASE_SMALL
        int
index 07ce18ca71e0cd46b70155269a77b04af23f6526..0874e2edd2756bcbe5542a496fb6b131b092b3ea 100644 (file)
@@ -604,7 +604,7 @@ return_normal:
                   online_cpus)
                cpu_relax();
        if (!time_left)
-               pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
+               pr_crit("Timed out waiting for secondary CPUs.\n");
 
        /*
         * At this point the primary processor is completely
@@ -696,6 +696,14 @@ kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
 
        if (arch_kgdb_ops.enable_nmi)
                arch_kgdb_ops.enable_nmi(0);
+       /*
+        * Avoid entering the debugger if we were triggered due to an oops
+        * but panic_timeout indicates the system should automatically
+        * reboot on panic. We don't want to get stuck waiting for input
+        * on such systems, especially if its "just" an oops.
+        */
+       if (signo != SIGTRAP && panic_timeout)
+               return 1;
 
        memset(ks, 0, sizeof(struct kgdb_state));
        ks->cpu                 = raw_smp_processor_id();
@@ -828,6 +836,15 @@ static int kgdb_panic_event(struct notifier_block *self,
                            unsigned long val,
                            void *data)
 {
+       /*
+        * Avoid entering the debugger if we were triggered due to a panic
+        * We don't want to get stuck waiting for input from user in such case.
+        * panic_timeout indicates the system should automatically
+        * reboot on panic.
+        */
+       if (panic_timeout)
+               return NOTIFY_DONE;
+
        if (dbg_kdb_mode)
                kdb_printf("PANIC: %s\n", (char *)data);
        kgdb_breakpoint();
index 7c70812caea5b3223fe1a9b7d9252980f90a02fe..fc1ef736253c79954686d018a2deca4c86300fa6 100644 (file)
@@ -439,7 +439,7 @@ poll_again:
  *     substituted for %d, %x or %o in the prompt.
  */
 
-char *kdb_getstr(char *buffer, size_t bufsize, char *prompt)
+char *kdb_getstr(char *buffer, size_t bufsize, const char *prompt)
 {
        if (prompt && kdb_prompt_str != prompt)
                strncpy(kdb_prompt_str, prompt, CMD_BUFLEN);
@@ -548,7 +548,7 @@ static int kdb_search_string(char *searched, char *searchfor)
        return 0;
 }
 
-int vkdb_printf(const char *fmt, va_list ap)
+int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap)
 {
        int diag;
        int linecount;
@@ -680,6 +680,12 @@ int vkdb_printf(const char *fmt, va_list ap)
                        size_avail = sizeof(kdb_buffer) - len;
                        goto kdb_print_out;
                }
+               if (kdb_grepping_flag >= KDB_GREPPING_FLAG_SEARCH)
+                       /*
+                        * This was a interactive search (using '/' at more
+                        * prompt) and it has completed. Clear the flag.
+                        */
+                       kdb_grepping_flag = 0;
                /*
                 * at this point the string is a full line and
                 * should be printed, up to the null.
@@ -691,19 +697,20 @@ kdb_printit:
         * Write to all consoles.
         */
        retlen = strlen(kdb_buffer);
+       cp = (char *) printk_skip_level(kdb_buffer);
        if (!dbg_kdb_mode && kgdb_connected) {
-               gdbstub_msg_write(kdb_buffer, retlen);
+               gdbstub_msg_write(cp, retlen - (cp - kdb_buffer));
        } else {
                if (dbg_io_ops && !dbg_io_ops->is_console) {
-                       len = retlen;
-                       cp = kdb_buffer;
+                       len = retlen - (cp - kdb_buffer);
+                       cp2 = cp;
                        while (len--) {
-                               dbg_io_ops->write_char(*cp);
-                               cp++;
+                               dbg_io_ops->write_char(*cp2);
+                               cp2++;
                        }
                }
                while (c) {
-                       c->write(c, kdb_buffer, retlen);
+                       c->write(c, cp, retlen - (cp - kdb_buffer));
                        touch_nmi_watchdog();
                        c = c->next;
                }
@@ -711,7 +718,10 @@ kdb_printit:
        if (logging) {
                saved_loglevel = console_loglevel;
                console_loglevel = CONSOLE_LOGLEVEL_SILENT;
-               printk(KERN_INFO "%s", kdb_buffer);
+               if (printk_get_level(kdb_buffer) || src == KDB_MSGSRC_PRINTK)
+                       printk("%s", kdb_buffer);
+               else
+                       pr_info("%s", kdb_buffer);
        }
 
        if (KDB_STATE(PAGER)) {
@@ -794,11 +804,23 @@ kdb_printit:
                        kdb_nextline = linecount - 1;
                        kdb_printf("\r");
                        suspend_grep = 1; /* for this recursion */
+               } else if (buf1[0] == '/' && !kdb_grepping_flag) {
+                       kdb_printf("\r");
+                       kdb_getstr(kdb_grep_string, KDB_GREP_STRLEN,
+                                  kdbgetenv("SEARCHPROMPT") ?: "search> ");
+                       *strchrnul(kdb_grep_string, '\n') = '\0';
+                       kdb_grepping_flag += KDB_GREPPING_FLAG_SEARCH;
+                       suspend_grep = 1; /* for this recursion */
                } else if (buf1[0] && buf1[0] != '\n') {
                        /* user hit something other than enter */
                        suspend_grep = 1; /* for this recursion */
-                       kdb_printf("\nOnly 'q' or 'Q' are processed at more "
-                                  "prompt, input ignored\n");
+                       if (buf1[0] != '/')
+                               kdb_printf(
+                                   "\nOnly 'q', 'Q' or '/' are processed at "
+                                   "more prompt, input ignored\n");
+                       else
+                               kdb_printf("\n'/' cannot be used during | "
+                                          "grep filtering, input ignored\n");
                } else if (kdb_grepping_flag) {
                        /* user hit enter */
                        suspend_grep = 1; /* for this recursion */
@@ -844,7 +866,7 @@ int kdb_printf(const char *fmt, ...)
        int r;
 
        va_start(ap, fmt);
-       r = vkdb_printf(fmt, ap);
+       r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap);
        va_end(ap);
 
        return r;
index 7b40c5f07dce8d09e1ebaba547e401b5655befbb..4121345498e0e48f10b414a4b12e0b5f15daeabd 100644 (file)
@@ -50,8 +50,7 @@
 static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
 module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
 
-#define GREP_LEN 256
-char kdb_grep_string[GREP_LEN];
+char kdb_grep_string[KDB_GREP_STRLEN];
 int kdb_grepping_flag;
 EXPORT_SYMBOL(kdb_grepping_flag);
 int kdb_grep_leading;
@@ -870,7 +869,7 @@ static void parse_grep(const char *str)
        len = strlen(cp);
        if (!len)
                return;
-       if (len >= GREP_LEN) {
+       if (len >= KDB_GREP_STRLEN) {
                kdb_printf("search string too long\n");
                return;
        }
@@ -915,13 +914,12 @@ int kdb_parse(const char *cmdstr)
        char *cp;
        char *cpp, quoted;
        kdbtab_t *tp;
-       int i, escaped, ignore_errors = 0, check_grep;
+       int i, escaped, ignore_errors = 0, check_grep = 0;
 
        /*
         * First tokenize the command string.
         */
        cp = (char *)cmdstr;
-       kdb_grepping_flag = check_grep = 0;
 
        if (KDB_FLAG(CMD_INTERRUPT)) {
                /* Previous command was interrupted, newline must not
@@ -1247,7 +1245,6 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
                kdb_printf("due to NonMaskable Interrupt @ "
                           kdb_machreg_fmt "\n",
                           instruction_pointer(regs));
-               kdb_dumpregs(regs);
                break;
        case KDB_REASON_SSTEP:
        case KDB_REASON_BREAK:
@@ -1281,6 +1278,9 @@ static int kdb_local(kdb_reason_t reason, int error, struct pt_regs *regs,
                 */
                kdb_nextline = 1;
                KDB_STATE_CLEAR(SUPPRESS);
+               kdb_grepping_flag = 0;
+               /* ensure the old search does not leak into '/' commands */
+               kdb_grep_string[0] = '\0';
 
                cmdbuf = cmd_cur;
                *cmdbuf = '\0';
@@ -2256,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
        /*
         * Validate cpunum
         */
-       if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
+       if ((cpunum >= CONFIG_NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
                return KDB_BADCPUNUM;
 
        dbg_switch_cpu = cpunum;
@@ -2583,7 +2583,7 @@ static int kdb_summary(int argc, const char **argv)
 #define K(x) ((x) << (PAGE_SHIFT - 10))
        kdb_printf("\nMemTotal:       %8lu kB\nMemFree:        %8lu kB\n"
                   "Buffers:        %8lu kB\n",
-                  val.totalram, val.freeram, val.bufferram);
+                  K(val.totalram), K(val.freeram), K(val.bufferram));
        return 0;
 }
 
index eaacd1693954b13aa55c59028c597b6ed91488f3..75014d7f45681b1a75643fd06dbc119e8ea16cb7 100644 (file)
@@ -196,7 +196,9 @@ extern int kdb_main_loop(kdb_reason_t, kdb_reason_t,
 
 /* Miscellaneous functions and data areas */
 extern int kdb_grepping_flag;
+#define KDB_GREPPING_FLAG_SEARCH 0x8000
 extern char kdb_grep_string[];
+#define KDB_GREP_STRLEN 256
 extern int kdb_grep_leading;
 extern int kdb_grep_trailing;
 extern char *kdb_cmds[];
@@ -209,7 +211,7 @@ extern void kdb_ps1(const struct task_struct *p);
 extern void kdb_print_nameval(const char *name, unsigned long val);
 extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
 extern void kdb_meminfo_proc_show(void);
-extern char *kdb_getstr(char *, size_t, char *);
+extern char *kdb_getstr(char *, size_t, const char *);
 extern void kdb_gdb_state_pass(char *buf);
 
 /* Defines for kdb_symbol_print */
index 52aa7e8de92705c02c061c3ecd3087cb1061b4f8..752d6486b67e15eba9113116972a2cd0450ca0cc 100644 (file)
@@ -1,33 +1,7 @@
 ccflags-y := -DSRCTREE='"$(srctree)"' -DOBJTREE='"$(objtree)"'
 
-# if-lt
-# Usage VAR := $(call if-lt, $(a), $(b))
-# Returns 1 if (a < b)
-if-lt = $(shell [ $(1) -lt $(2) ] && echo 1)
-
-ifeq ($(CONFIG_GCOV_FORMAT_3_4),y)
-  cc-ver := 0304
-else ifeq ($(CONFIG_GCOV_FORMAT_4_7),y)
-  cc-ver := 0407
-else
-# Use cc-version if available, otherwise set 0
-#
-# scripts/Kbuild.include, which contains cc-version function, is not included
-# during make clean "make -f scripts/Makefile.clean obj=kernel/gcov"
-# Meaning cc-ver is empty causing if-lt test to fail with
-# "/bin/sh: line 0: [: -lt: unary operator expected" error mesage.
-# This has no affect on the clean phase, but the error message could be
-# confusing/annoying. So this dummy workaround sets cc-ver to zero if cc-version
-# is not available. We can probably move if-lt to Kbuild.include, so it's also
-# not defined during clean or to include Kbuild.include in
-# scripts/Makefile.clean. But the following workaround seems least invasive.
-  cc-ver := $(if $(call cc-version),$(call cc-version),0)
-endif
-
-obj-$(CONFIG_GCOV_KERNEL) := base.o fs.o
-
-ifeq ($(call if-lt, $(cc-ver), 0407),1)
-  obj-$(CONFIG_GCOV_KERNEL) += gcc_3_4.o
-else
-  obj-$(CONFIG_GCOV_KERNEL) += gcc_4_7.o
-endif
+obj-y := base.o fs.o
+obj-$(CONFIG_GCOV_FORMAT_3_4) += gcc_3_4.o
+obj-$(CONFIG_GCOV_FORMAT_4_7) += gcc_4_7.o
+obj-$(CONFIG_GCOV_FORMAT_AUTODETECT) += $(call cc-ifversion, -lt, 0407, \
+                                                       gcc_3_4.o, gcc_4_7.o)
index ff7f47d026ac48b21d6239f9db36ee8662585a45..782172f073c5ed4bde5318bf96777ee79c618b89 100644 (file)
@@ -314,12 +314,12 @@ static void notrace klp_ftrace_handler(unsigned long ip,
        rcu_read_lock();
        func = list_first_or_null_rcu(&ops->func_stack, struct klp_func,
                                      stack_node);
-       rcu_read_unlock();
-
        if (WARN_ON_ONCE(!func))
-               return;
+               goto unlock;
 
        klp_arch_set_pc(regs, (unsigned long)func->new_func);
+unlock:
+       rcu_read_unlock();
 }
 
 static int klp_disable_func(struct klp_func *func)
@@ -731,7 +731,7 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func)
        func->state = KLP_DISABLED;
 
        return kobject_init_and_add(&func->kobj, &klp_ktype_func,
-                                   obj->kobj, func->old_name);
+                                   obj->kobj, "%s", func->old_name);
 }
 
 /* parts of the initialization that is done only when the object is loaded */
@@ -807,7 +807,7 @@ static int klp_init_patch(struct klp_patch *patch)
        patch->state = KLP_DISABLED;
 
        ret = kobject_init_and_add(&patch->kobj, &klp_ktype_patch,
-                                  klp_root_kobj, patch->mod->name);
+                                  klp_root_kobj, "%s", patch->mod->name);
        if (ret)
                goto unlock;
 
index 3059bc2f022daa6e4d8d976c39a7d8a8f546d813..e16e5542bf13f381a36c304b7e6b1ff9b66aaea0 100644 (file)
@@ -1193,7 +1193,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
                ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
 
        if (unlikely(ret)) {
-               remove_waiter(lock, &waiter);
+               if (rt_mutex_has_waiters(lock))
+                       remove_waiter(lock, &waiter);
                rt_mutex_handle_deadlock(ret, chwalk, &waiter);
        }
 
index c06df7de0963a3c82889c274dd2d707507d31d50..01cfd69c54c6772ad49a1d81120258f765f435a1 100644 (file)
@@ -1811,7 +1811,7 @@ int vprintk_default(const char *fmt, va_list args)
 
 #ifdef CONFIG_KGDB_KDB
        if (unlikely(kdb_trap_printk)) {
-               r = vkdb_printf(fmt, args);
+               r = vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
                return r;
        }
 #endif
index 0d7bbe3095ad717c369b142079f16236a58e1af7..0a571e9a0f1d00868c74c5032912c1a0b24dce94 100644 (file)
@@ -326,6 +326,7 @@ void rcu_read_unlock_special(struct task_struct *t)
        special = t->rcu_read_unlock_special;
        if (special.b.need_qs) {
                rcu_preempt_qs();
+               t->rcu_read_unlock_special.b.need_qs = false;
                if (!t->rcu_read_unlock_special.s) {
                        local_irq_restore(flags);
                        return;
index 8a2e230fb86ad43e3196488961f2b71d1e8b28ed..eae160dd669d9d8d58bb911595c6391b2732edb4 100644 (file)
@@ -87,8 +87,7 @@ static inline struct autogroup *autogroup_create(void)
         * so we don't have to move tasks around upon policy change,
         * or flail around trying to allocate bandwidth on the fly.
         * A bandwidth exception in __sched_setscheduler() allows
-        * the policy change to proceed.  Thereafter, task_group()
-        * returns &root_task_group, so zero bandwidth is required.
+        * the policy change to proceed.
         */
        free_rt_sched_group(tg);
        tg->rt_se = root_task_group.rt_se;
@@ -115,9 +114,6 @@ bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
        if (tg != &root_task_group)
                return false;
 
-       if (p->sched_class != &fair_sched_class)
-               return false;
-
        /*
         * We can only assume the task group can't go away on us if
         * autogroup_move_group() can see us on ->thread_group list.
index 7052d3fd4e7bd87a29bd144cbca1086621040251..8d0f35debf35657689908a4b37df7230ba7d6710 100644 (file)
@@ -274,7 +274,7 @@ bool try_wait_for_completion(struct completion *x)
         * first without taking the lock so we can
         * return early in the blocking case.
         */
-       if (!ACCESS_ONCE(x->done))
+       if (!READ_ONCE(x->done))
                return 0;
 
        spin_lock_irqsave(&x->wait.lock, flags);
@@ -297,6 +297,21 @@ EXPORT_SYMBOL(try_wait_for_completion);
  */
 bool completion_done(struct completion *x)
 {
-       return !!ACCESS_ONCE(x->done);
+       if (!READ_ONCE(x->done))
+               return false;
+
+       /*
+        * If ->done, we need to wait for complete() to release ->wait.lock
+        * otherwise we can end up freeing the completion before complete()
+        * is done referencing it.
+        *
+        * The RMB pairs with complete()'s RELEASE of ->wait.lock and orders
+        * the loads of ->done and ->wait.lock such that we cannot observe
+        * the lock before complete() acquires it while observing the ->done
+        * after it's acquired the lock.
+        */
+       smp_rmb();
+       spin_unlock_wait(&x->wait.lock);
+       return true;
 }
 EXPORT_SYMBOL(completion_done);
index 13049aac05a6242e44a59b9ec424f1004448b3d4..f0f831e8a345d835f4cb21bf899c50ab67042b43 100644 (file)
@@ -306,66 +306,6 @@ __read_mostly int scheduler_running;
  */
 int sysctl_sched_rt_runtime = 950000;
 
-/*
- * __task_rq_lock - lock the rq @p resides on.
- */
-static inline struct rq *__task_rq_lock(struct task_struct *p)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       lockdep_assert_held(&p->pi_lock);
-
-       for (;;) {
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-
-               while (unlikely(task_on_rq_migrating(p)))
-                       cpu_relax();
-       }
-}
-
-/*
- * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
- */
-static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
-       __acquires(p->pi_lock)
-       __acquires(rq->lock)
-{
-       struct rq *rq;
-
-       for (;;) {
-               raw_spin_lock_irqsave(&p->pi_lock, *flags);
-               rq = task_rq(p);
-               raw_spin_lock(&rq->lock);
-               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
-                       return rq;
-               raw_spin_unlock(&rq->lock);
-               raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-
-               while (unlikely(task_on_rq_migrating(p)))
-                       cpu_relax();
-       }
-}
-
-static void __task_rq_unlock(struct rq *rq)
-       __releases(rq->lock)
-{
-       raw_spin_unlock(&rq->lock);
-}
-
-static inline void
-task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
-       __releases(rq->lock)
-       __releases(p->pi_lock)
-{
-       raw_spin_unlock(&rq->lock);
-       raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
-}
-
 /*
  * this_rq_lock - lock this runqueue and disable interrupts.
  */
@@ -2899,7 +2839,7 @@ void __sched schedule_preempt_disabled(void)
        preempt_disable();
 }
 
-static void preempt_schedule_common(void)
+static void __sched notrace preempt_schedule_common(void)
 {
        do {
                __preempt_count_add(PREEMPT_ACTIVE);
@@ -4418,36 +4358,29 @@ EXPORT_SYMBOL_GPL(yield_to);
  * This task is about to go to sleep on IO. Increment rq->nr_iowait so
  * that process accounting knows that this is a task in IO wait state.
  */
-void __sched io_schedule(void)
-{
-       struct rq *rq = raw_rq();
-
-       delayacct_blkio_start();
-       atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
-       schedule();
-       current->in_iowait = 0;
-       atomic_dec(&rq->nr_iowait);
-       delayacct_blkio_end();
-}
-EXPORT_SYMBOL(io_schedule);
-
 long __sched io_schedule_timeout(long timeout)
 {
-       struct rq *rq = raw_rq();
+       int old_iowait = current->in_iowait;
+       struct rq *rq;
        long ret;
 
+       current->in_iowait = 1;
+       if (old_iowait)
+               blk_schedule_flush_plug(current);
+       else
+               blk_flush_plug(current);
+
        delayacct_blkio_start();
+       rq = raw_rq();
        atomic_inc(&rq->nr_iowait);
-       blk_flush_plug(current);
-       current->in_iowait = 1;
        ret = schedule_timeout(timeout);
-       current->in_iowait = 0;
+       current->in_iowait = old_iowait;
        atomic_dec(&rq->nr_iowait);
        delayacct_blkio_end();
+
        return ret;
 }
+EXPORT_SYMBOL(io_schedule_timeout);
 
 /**
  * sys_sched_get_priority_max - return maximum RT priority.
@@ -7642,6 +7575,12 @@ static inline int tg_has_rt_tasks(struct task_group *tg)
 {
        struct task_struct *g, *p;
 
+       /*
+        * Autogroups do not have RT tasks; see autogroup_create().
+        */
+       if (task_group_is_autogroup(tg))
+               return 0;
+
        for_each_process_thread(g, p) {
                if (rt_task(p) && task_group(p) == tg)
                        return 1;
@@ -7734,6 +7673,17 @@ static int tg_set_rt_bandwidth(struct task_group *tg,
 {
        int i, err = 0;
 
+       /*
+        * Disallowing the root group RT runtime is BAD, it would disallow the
+        * kernel creating (and or operating) RT threads.
+        */
+       if (tg == &root_task_group && rt_runtime == 0)
+               return -EINVAL;
+
+       /* No period doesn't make any sense. */
+       if (rt_period == 0)
+               return -EINVAL;
+
        mutex_lock(&rt_constraints_mutex);
        read_lock(&tasklist_lock);
        err = __rt_schedulable(tg, rt_period, rt_runtime);
@@ -7790,9 +7740,6 @@ static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
        rt_period = (u64)rt_period_us * NSEC_PER_USEC;
        rt_runtime = tg->rt_bandwidth.rt_runtime;
 
-       if (rt_period == 0)
-               return -EINVAL;
-
        return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
 }
 
index a027799ae130d3623ff4351f08c3cf456979bfbc..3fa8fa6d940300c1fbae721503aad2666f72b4e5 100644 (file)
@@ -511,16 +511,10 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
                                                     struct sched_dl_entity,
                                                     dl_timer);
        struct task_struct *p = dl_task_of(dl_se);
+       unsigned long flags;
        struct rq *rq;
-again:
-       rq = task_rq(p);
-       raw_spin_lock(&rq->lock);
 
-       if (rq != task_rq(p)) {
-               /* Task was moved, retrying. */
-               raw_spin_unlock(&rq->lock);
-               goto again;
-       }
+       rq = task_rq_lock(current, &flags);
 
        /*
         * We need to take care of several possible races here:
@@ -541,6 +535,26 @@ again:
 
        sched_clock_tick();
        update_rq_clock(rq);
+
+       /*
+        * If the throttle happened during sched-out; like:
+        *
+        *   schedule()
+        *     deactivate_task()
+        *       dequeue_task_dl()
+        *         update_curr_dl()
+        *           start_dl_timer()
+        *         __dequeue_task_dl()
+        *     prev->on_rq = 0;
+        *
+        * We can be both throttled and !queued. Replenish the counter
+        * but do not enqueue -- wait for our wakeup to do that.
+        */
+       if (!task_on_rq_queued(p)) {
+               replenish_dl_entity(dl_se, dl_se);
+               goto unlock;
+       }
+
        enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
        if (dl_task(rq->curr))
                check_preempt_curr_dl(rq, p, 0);
@@ -555,7 +569,7 @@ again:
                push_dl_task(rq);
 #endif
 unlock:
-       raw_spin_unlock(&rq->lock);
+       task_rq_unlock(rq, current, &flags);
 
        return HRTIMER_NORESTART;
 }
@@ -898,6 +912,7 @@ static void yield_task_dl(struct rq *rq)
                rq->curr->dl.dl_yielded = 1;
                p->dl.runtime = 0;
        }
+       update_rq_clock(rq);
        update_curr_dl(rq);
 }
 
index 0870db23d79cb3c0578b4f4b3450f5dad02c929e..dc0f435a27794657258623ac8a7f53f7326ff7ac 100644 (file)
@@ -1380,6 +1380,82 @@ static inline void sched_avg_update(struct rq *rq) { }
 
 extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
 
+/*
+ * __task_rq_lock - lock the rq @p resides on.
+ */
+static inline struct rq *__task_rq_lock(struct task_struct *p)
+       __acquires(rq->lock)
+{
+       struct rq *rq;
+
+       lockdep_assert_held(&p->pi_lock);
+
+       for (;;) {
+               rq = task_rq(p);
+               raw_spin_lock(&rq->lock);
+               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+                       return rq;
+               raw_spin_unlock(&rq->lock);
+
+               while (unlikely(task_on_rq_migrating(p)))
+                       cpu_relax();
+       }
+}
+
+/*
+ * task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
+ */
+static inline struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
+       __acquires(p->pi_lock)
+       __acquires(rq->lock)
+{
+       struct rq *rq;
+
+       for (;;) {
+               raw_spin_lock_irqsave(&p->pi_lock, *flags);
+               rq = task_rq(p);
+               raw_spin_lock(&rq->lock);
+               /*
+                *      move_queued_task()              task_rq_lock()
+                *
+                *      ACQUIRE (rq->lock)
+                *      [S] ->on_rq = MIGRATING         [L] rq = task_rq()
+                *      WMB (__set_task_cpu())          ACQUIRE (rq->lock);
+                *      [S] ->cpu = new_cpu             [L] task_rq()
+                *                                      [L] ->on_rq
+                *      RELEASE (rq->lock)
+                *
+                * If we observe the old cpu in task_rq_lock, the acquire of
+                * the old rq->lock will fully serialize against the stores.
+                *
+                * If we observe the new cpu in task_rq_lock, the acquire will
+                * pair with the WMB to ensure we must then also see migrating.
+                */
+               if (likely(rq == task_rq(p) && !task_on_rq_migrating(p)))
+                       return rq;
+               raw_spin_unlock(&rq->lock);
+               raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+
+               while (unlikely(task_on_rq_migrating(p)))
+                       cpu_relax();
+       }
+}
+
+static inline void __task_rq_unlock(struct rq *rq)
+       __releases(rq->lock)
+{
+       raw_spin_unlock(&rq->lock);
+}
+
+static inline void
+task_rq_unlock(struct rq *rq, struct task_struct *p, unsigned long *flags)
+       __releases(rq->lock)
+       __releases(p->pi_lock)
+{
+       raw_spin_unlock(&rq->lock);
+       raw_spin_unlock_irqrestore(&p->pi_lock, *flags);
+}
+
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PREEMPT
 
index ea9c881098941ecd9bbb42fa69a3ddb958d2ec41..667b2e62fad25bffd03109b4468fd3cbd4af00bf 100644 (file)
 #ifndef MPX_DISABLE_MANAGEMENT
 # define MPX_DISABLE_MANAGEMENT(a)     (-EINVAL)
 #endif
+#ifndef GET_FP_MODE
+# define GET_FP_MODE(a)                (-EINVAL)
+#endif
+#ifndef SET_FP_MODE
+# define SET_FP_MODE(a,b)      (-EINVAL)
+#endif
 
 /*
  * this is where the system-wide overflow UID and GID are defined, for
@@ -2219,6 +2225,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
                        return -EINVAL;
                error = MPX_DISABLE_MANAGEMENT(me);
                break;
+       case PR_SET_FP_MODE:
+               error = SET_FP_MODE(me, arg2);
+               break;
+       case PR_GET_FP_MODE:
+               error = GET_FP_MODE(me);
+               break;
        default:
                error = -EINVAL;
                break;
index 4b585e0fdd22e16288f688baa1051395836461d5..0f60b08a4f073e9246ced1dc3b5de5f50efd7cf4 100644 (file)
@@ -633,10 +633,14 @@ int ntp_validate_timex(struct timex *txc)
        if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
                return -EPERM;
 
-       if (txc->modes & ADJ_FREQUENCY) {
-               if (LONG_MIN / PPM_SCALE > txc->freq)
+       /*
+        * Check for potential multiplication overflows that can
+        * only happen on 64-bit systems:
+        */
+       if ((txc->modes & ADJ_FREQUENCY) && (BITS_PER_LONG == 64)) {
+               if (LLONG_MIN / PPM_SCALE > txc->freq)
                        return -EINVAL;
-               if (LONG_MAX / PPM_SCALE < txc->freq)
+               if (LLONG_MAX / PPM_SCALE < txc->freq)
                        return -EINVAL;
        }
 
index cb9758e0ba0cd42d43cb8e7d6f86810006b80265..87da53bb1fefd6fc00c9adf31f99716852771036 100644 (file)
@@ -23,7 +23,7 @@ config HAVE_ARCH_BITREVERSE
          have this capability.
 
 config RATIONAL
-       boolean
+       bool
 
 config GENERIC_STRNCPY_FROM_USER
        bool
@@ -48,14 +48,14 @@ config GENERIC_IOMAP
        select GENERIC_PCI_IOMAP
 
 config GENERIC_IO
-       boolean
+       bool
        default n
 
 config STMP_DEVICE
        bool
 
 config PERCPU_RWSEM
-       boolean
+       bool
 
 config ARCH_USE_CMPXCHG_LOCKREF
        bool
@@ -266,7 +266,7 @@ config DECOMPRESS_LZ4
 # Generic allocator support is selected if needed
 #
 config GENERIC_ALLOCATOR
-       boolean
+       bool
 
 #
 # reed solomon support is select'ed if needed
@@ -275,16 +275,16 @@ config REED_SOLOMON
        tristate
        
 config REED_SOLOMON_ENC8
-       boolean
+       bool
 
 config REED_SOLOMON_DEC8
-       boolean
+       bool
 
 config REED_SOLOMON_ENC16
-       boolean
+       bool
 
 config REED_SOLOMON_DEC16
-       boolean
+       bool
 
 #
 # BCH support is selected if needed
@@ -293,7 +293,7 @@ config BCH
        tristate
 
 config BCH_CONST_PARAMS
-       boolean
+       bool
        help
          Drivers may select this option to force specific constant
          values for parameters 'm' (Galois field order) and 't'
@@ -329,7 +329,7 @@ config BCH_CONST_T
 # Textsearch support is select'ed if needed
 #
 config TEXTSEARCH
-       boolean
+       bool
 
 config TEXTSEARCH_KMP
        tristate
@@ -341,10 +341,10 @@ config TEXTSEARCH_FSM
        tristate
 
 config BTREE
-       boolean
+       bool
 
 config INTERVAL_TREE
-       boolean
+       bool
        help
          Simple, embeddable, interval-tree. Can find the start of an
          overlapping range in log(n) time and then iterate over all
@@ -372,18 +372,18 @@ config ASSOCIATIVE_ARRAY
          for more information.
 
 config HAS_IOMEM
-       boolean
+       bool
        depends on !NO_IOMEM
        select GENERIC_IO
        default y
 
 config HAS_IOPORT_MAP
-       boolean
+       bool
        depends on HAS_IOMEM && !NO_IOPORT_MAP
        default y
 
 config HAS_DMA
-       boolean
+       bool
        depends on !NO_DMA
        default y
 
index de5239c152f9fb8ea9f66361fa7fba7cdc1d00d9..a03131b6ba8e7877b537a77fa0c21f5cd005e275 100644 (file)
@@ -129,28 +129,28 @@ config SPARSEMEM_VMEMMAP
         efficient option when sufficient kernel resources are available.
 
 config HAVE_MEMBLOCK
-       boolean
+       bool
 
 config HAVE_MEMBLOCK_NODE_MAP
-       boolean
+       bool
 
 config HAVE_MEMBLOCK_PHYS_MAP
-       boolean
+       bool
 
 config HAVE_GENERIC_RCU_GUP
-       boolean
+       bool
 
 config ARCH_DISCARD_MEMBLOCK
-       boolean
+       bool
 
 config NO_BOOTMEM
-       boolean
+       bool
 
 config MEMORY_ISOLATION
-       boolean
+       bool
 
 config MOVABLE_NODE
-       boolean "Enable to assign a node which has only movable memory"
+       bool "Enable to assign a node which has only movable memory"
        depends on HAVE_MEMBLOCK
        depends on NO_BOOTMEM
        depends on X86_64
@@ -228,12 +228,12 @@ config SPLIT_PTLOCK_CPUS
        default "4"
 
 config ARCH_ENABLE_SPLIT_PMD_PTLOCK
-       boolean
+       bool
 
 #
 # support for memory balloon
 config MEMORY_BALLOON
-       boolean
+       bool
 
 #
 # support for memory balloon compaction
@@ -276,7 +276,7 @@ config MIGRATION
          allocation instead of reclaiming.
 
 config ARCH_ENABLE_HUGEPAGE_MIGRATION
-       boolean
+       bool
 
 config PHYS_ADDR_T_64BIT
        def_bool 64BIT || ARCH_PHYS_ADDR_T_64BIT
index a63031fa3e0c1e4380e6937aa711df912c9a687f..2f17cb5f00a43f87b7868a2a2192dd911f4501bd 100644 (file)
@@ -2319,8 +2319,8 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
 
 static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry)
 {
-       bool old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
-       bool new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode);
+       bool old_is_dir = d_is_dir(old_dentry);
+       bool new_is_dir = d_is_dir(new_dentry);
 
        if (old_dir != new_dir && old_is_dir != new_is_dir) {
                if (old_is_dir) {
index ff9ffc17fa0e1fc438e9e4ebec20376ddd6ec969..44dd5786ee91da16ae920d3c9f62b1f4bac353c8 100644 (file)
@@ -231,18 +231,18 @@ source "net/hsr/Kconfig"
 source "net/switchdev/Kconfig"
 
 config RPS
-       boolean
+       bool
        depends on SMP && SYSFS
        default y
 
 config RFS_ACCEL
-       boolean
+       bool
        depends on RPS
        select CPU_RMAP
        default y
 
 config XPS
-       boolean
+       bool
        depends on SMP
        default y
 
@@ -254,18 +254,18 @@ config CGROUP_NET_PRIO
          a per-interface basis.
 
 config CGROUP_NET_CLASSID
-       boolean "Network classid cgroup"
+       bool "Network classid cgroup"
        depends on CGROUPS
        ---help---
          Cgroup subsystem for use as general purpose socket classid marker that is
          being used in cls_cgroup and for netfilter matching.
 
 config NET_RX_BUSY_POLL
-       boolean
+       bool
        default y
 
 config BQL
-       boolean
+       bool
        depends on SYSFS
        select DQL
        default y
@@ -282,7 +282,7 @@ config BPF_JIT
          this feature changing /proc/sys/net/core/bpf_jit_enable
 
 config NET_FLOW_LIMIT
-       boolean
+       bool
        depends on RPS
        default y
        ---help---
index 5d5ab67f516dfa16ee5d86d6c312cf0a201bc3a4..ec565508e904113e65329b89dec5952bf5d41075 100644 (file)
@@ -239,6 +239,8 @@ enum {
        Opt_nocrc,
        Opt_cephx_require_signatures,
        Opt_nocephx_require_signatures,
+       Opt_tcp_nodelay,
+       Opt_notcp_nodelay,
 };
 
 static match_table_t opt_tokens = {
@@ -259,6 +261,8 @@ static match_table_t opt_tokens = {
        {Opt_nocrc, "nocrc"},
        {Opt_cephx_require_signatures, "cephx_require_signatures"},
        {Opt_nocephx_require_signatures, "nocephx_require_signatures"},
+       {Opt_tcp_nodelay, "tcp_nodelay"},
+       {Opt_notcp_nodelay, "notcp_nodelay"},
        {-1, NULL}
 };
 
@@ -457,6 +461,7 @@ ceph_parse_options(char *options, const char *dev_name,
                case Opt_nocrc:
                        opt->flags |= CEPH_OPT_NOCRC;
                        break;
+
                case Opt_cephx_require_signatures:
                        opt->flags &= ~CEPH_OPT_NOMSGAUTH;
                        break;
@@ -464,6 +469,13 @@ ceph_parse_options(char *options, const char *dev_name,
                        opt->flags |= CEPH_OPT_NOMSGAUTH;
                        break;
 
+               case Opt_tcp_nodelay:
+                       opt->flags |= CEPH_OPT_TCP_NODELAY;
+                       break;
+               case Opt_notcp_nodelay:
+                       opt->flags &= ~CEPH_OPT_TCP_NODELAY;
+                       break;
+
                default:
                        BUG_ON(token);
                }
@@ -518,10 +530,12 @@ struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private,
        /* msgr */
        if (ceph_test_opt(client, MYIP))
                myaddr = &client->options->my_addr;
+
        ceph_messenger_init(&client->msgr, myaddr,
                client->supported_features,
                client->required_features,
-               ceph_test_opt(client, NOCRC));
+               ceph_test_opt(client, NOCRC),
+               ceph_test_opt(client, TCP_NODELAY));
 
        /* subsystems */
        err = ceph_monc_init(&client->monc, client);
index 30560202f57b481fcee064a242ca13b55cd16f11..139a9cb19b0c6ca9b07e1184241c33b08cdd141f 100644 (file)
@@ -42,17 +42,3 @@ const char *ceph_osd_state_name(int s)
                return "???";
        }
 }
-
-const char *ceph_pool_op_name(int op)
-{
-       switch (op) {
-       case POOL_OP_CREATE: return "create";
-       case POOL_OP_DELETE: return "delete";
-       case POOL_OP_AUID_CHANGE: return "auid change";
-       case POOL_OP_CREATE_SNAP: return "create snap";
-       case POOL_OP_DELETE_SNAP: return "delete snap";
-       case POOL_OP_CREATE_UNMANAGED_SNAP: return "create unmanaged snap";
-       case POOL_OP_DELETE_UNMANAGED_SNAP: return "delete unmanaged snap";
-       }
-       return "???";
-}
index d2d525529f8770452412e21b51cd55b3559e6192..14d9995097cc84ea33f8c6811be18bc3919fcbbd 100644 (file)
@@ -127,8 +127,6 @@ static int monc_show(struct seq_file *s, void *p)
                op = le16_to_cpu(req->request->hdr.type);
                if (op == CEPH_MSG_STATFS)
                        seq_printf(s, "%llu statfs\n", req->tid);
-               else if (op == CEPH_MSG_POOLOP)
-                       seq_printf(s, "%llu poolop\n", req->tid);
                else if (op == CEPH_MSG_MON_GET_VERSION)
                        seq_printf(s, "%llu mon_get_version", req->tid);
                else
index 33a2f201e460e1585f82db94c8f3f90f01bbacdf..6b3f54ed65ba6fc4ff392877f662ef5dddeb8939 100644 (file)
@@ -510,6 +510,16 @@ static int ceph_tcp_connect(struct ceph_connection *con)
                return ret;
        }
 
+       if (con->msgr->tcp_nodelay) {
+               int optval = 1;
+
+               ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
+                                       (char *)&optval, sizeof(optval));
+               if (ret)
+                       pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d",
+                              ret);
+       }
+
        sk_set_memalloc(sock->sk);
 
        con->sock = sock;
@@ -2922,7 +2932,8 @@ void ceph_messenger_init(struct ceph_messenger *msgr,
                        struct ceph_entity_addr *myaddr,
                        u64 supported_features,
                        u64 required_features,
-                       bool nocrc)
+                       bool nocrc,
+                       bool tcp_nodelay)
 {
        msgr->supported_features = supported_features;
        msgr->required_features = required_features;
@@ -2937,6 +2948,7 @@ void ceph_messenger_init(struct ceph_messenger *msgr,
        get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
        encode_my_addr(msgr);
        msgr->nocrc = nocrc;
+       msgr->tcp_nodelay = tcp_nodelay;
 
        atomic_set(&msgr->stopping, 0);
 
index f2148e22b14897727faeba297e045f2b933a52b1..2b3cf05e87b0fc44a150f1c314f2db98f1ab7dfc 100644 (file)
@@ -410,7 +410,7 @@ out_unlocked:
 }
 
 /*
- * generic requests (e.g., statfs, poolop)
+ * generic requests (currently statfs, mon_get_version)
  */
 static struct ceph_mon_generic_request *__lookup_generic_req(
        struct ceph_mon_client *monc, u64 tid)
@@ -569,7 +569,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc,
        return;
 
 bad:
-       pr_err("corrupt generic reply, tid %llu\n", tid);
+       pr_err("corrupt statfs reply, tid %llu\n", tid);
        ceph_msg_dump(msg);
 }
 
@@ -588,7 +588,6 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
 
        kref_init(&req->kref);
        req->buf = buf;
-       req->buf_len = sizeof(*buf);
        init_completion(&req->completion);
 
        err = -ENOMEM;
@@ -611,7 +610,7 @@ int ceph_monc_do_statfs(struct ceph_mon_client *monc, struct ceph_statfs *buf)
        err = do_generic_request(monc, req);
 
 out:
-       kref_put(&req->kref, release_generic_request);
+       put_generic_request(req);
        return err;
 }
 EXPORT_SYMBOL(ceph_monc_do_statfs);
@@ -647,7 +646,7 @@ static void handle_get_version_reply(struct ceph_mon_client *monc,
 
        return;
 bad:
-       pr_err("corrupt mon_get_version reply\n");
+       pr_err("corrupt mon_get_version reply, tid %llu\n", tid);
        ceph_msg_dump(msg);
 }
 
@@ -670,7 +669,6 @@ int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what,
 
        kref_init(&req->kref);
        req->buf = newest;
-       req->buf_len = sizeof(*newest);
        init_completion(&req->completion);
 
        req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION,
@@ -701,133 +699,11 @@ int ceph_monc_do_get_version(struct ceph_mon_client *monc, const char *what,
 
        mutex_unlock(&monc->mutex);
 out:
-       kref_put(&req->kref, release_generic_request);
+       put_generic_request(req);
        return err;
 }
 EXPORT_SYMBOL(ceph_monc_do_get_version);
 
-/*
- * pool ops
- */
-static int get_poolop_reply_buf(const char *src, size_t src_len,
-                               char *dst, size_t dst_len)
-{
-       u32 buf_len;
-
-       if (src_len != sizeof(u32) + dst_len)
-               return -EINVAL;
-
-       buf_len = le32_to_cpu(*(__le32 *)src);
-       if (buf_len != dst_len)
-               return -EINVAL;
-
-       memcpy(dst, src + sizeof(u32), dst_len);
-       return 0;
-}
-
-static void handle_poolop_reply(struct ceph_mon_client *monc,
-                               struct ceph_msg *msg)
-{
-       struct ceph_mon_generic_request *req;
-       struct ceph_mon_poolop_reply *reply = msg->front.iov_base;
-       u64 tid = le64_to_cpu(msg->hdr.tid);
-
-       if (msg->front.iov_len < sizeof(*reply))
-               goto bad;
-       dout("handle_poolop_reply %p tid %llu\n", msg, tid);
-
-       mutex_lock(&monc->mutex);
-       req = __lookup_generic_req(monc, tid);
-       if (req) {
-               if (req->buf_len &&
-                   get_poolop_reply_buf(msg->front.iov_base + sizeof(*reply),
-                                    msg->front.iov_len - sizeof(*reply),
-                                    req->buf, req->buf_len) < 0) {
-                       mutex_unlock(&monc->mutex);
-                       goto bad;
-               }
-               req->result = le32_to_cpu(reply->reply_code);
-               get_generic_request(req);
-       }
-       mutex_unlock(&monc->mutex);
-       if (req) {
-               complete(&req->completion);
-               put_generic_request(req);
-       }
-       return;
-
-bad:
-       pr_err("corrupt generic reply, tid %llu\n", tid);
-       ceph_msg_dump(msg);
-}
-
-/*
- * Do a synchronous pool op.
- */
-static int do_poolop(struct ceph_mon_client *monc, u32 op,
-                       u32 pool, u64 snapid,
-                       char *buf, int len)
-{
-       struct ceph_mon_generic_request *req;
-       struct ceph_mon_poolop *h;
-       int err;
-
-       req = kzalloc(sizeof(*req), GFP_NOFS);
-       if (!req)
-               return -ENOMEM;
-
-       kref_init(&req->kref);
-       req->buf = buf;
-       req->buf_len = len;
-       init_completion(&req->completion);
-
-       err = -ENOMEM;
-       req->request = ceph_msg_new(CEPH_MSG_POOLOP, sizeof(*h), GFP_NOFS,
-                                   true);
-       if (!req->request)
-               goto out;
-       req->reply = ceph_msg_new(CEPH_MSG_POOLOP_REPLY, 1024, GFP_NOFS,
-                                 true);
-       if (!req->reply)
-               goto out;
-
-       /* fill out request */
-       req->request->hdr.version = cpu_to_le16(2);
-       h = req->request->front.iov_base;
-       h->monhdr.have_version = 0;
-       h->monhdr.session_mon = cpu_to_le16(-1);
-       h->monhdr.session_mon_tid = 0;
-       h->fsid = monc->monmap->fsid;
-       h->pool = cpu_to_le32(pool);
-       h->op = cpu_to_le32(op);
-       h->auid = 0;
-       h->snapid = cpu_to_le64(snapid);
-       h->name_len = 0;
-
-       err = do_generic_request(monc, req);
-
-out:
-       kref_put(&req->kref, release_generic_request);
-       return err;
-}
-
-int ceph_monc_create_snapid(struct ceph_mon_client *monc,
-                           u32 pool, u64 *snapid)
-{
-       return do_poolop(monc,  POOL_OP_CREATE_UNMANAGED_SNAP,
-                                  pool, 0, (char *)snapid, sizeof(*snapid));
-
-}
-EXPORT_SYMBOL(ceph_monc_create_snapid);
-
-int ceph_monc_delete_snapid(struct ceph_mon_client *monc,
-                           u32 pool, u64 snapid)
-{
-       return do_poolop(monc,  POOL_OP_CREATE_UNMANAGED_SNAP,
-                                  pool, snapid, NULL, 0);
-
-}
-
 /*
  * Resend pending generic requests.
  */
@@ -1112,10 +988,6 @@ static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
                handle_get_version_reply(monc, msg);
                break;
 
-       case CEPH_MSG_POOLOP_REPLY:
-               handle_poolop_reply(monc, msg);
-               break;
-
        case CEPH_MSG_MON_MAP:
                ceph_monc_handle_map(monc, msg);
                break;
@@ -1154,7 +1026,6 @@ static struct ceph_msg *mon_alloc_msg(struct ceph_connection *con,
        case CEPH_MSG_MON_SUBSCRIBE_ACK:
                m = ceph_msg_get(monc->m_subscribe_ack);
                break;
-       case CEPH_MSG_POOLOP_REPLY:
        case CEPH_MSG_STATFS_REPLY:
                return get_generic_reply(con, hdr, skip);
        case CEPH_MSG_AUTH_REPLY:
index 53299c7b0ca4a516ba48a7c886b8b9539bb29518..41a4abc7e98eebfd36487d6d381f680732d4cd68 100644 (file)
@@ -1035,10 +1035,11 @@ static void put_osd(struct ceph_osd *osd)
 {
        dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
             atomic_read(&osd->o_ref) - 1);
-       if (atomic_dec_and_test(&osd->o_ref) && osd->o_auth.authorizer) {
+       if (atomic_dec_and_test(&osd->o_ref)) {
                struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
 
-               ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
+               if (osd->o_auth.authorizer)
+                       ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
                kfree(osd);
        }
 }
@@ -1048,14 +1049,24 @@ static void put_osd(struct ceph_osd *osd)
  */
 static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
 {
-       dout("__remove_osd %p\n", osd);
+       dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
        WARN_ON(!list_empty(&osd->o_requests));
        WARN_ON(!list_empty(&osd->o_linger_requests));
 
-       rb_erase(&osd->o_node, &osdc->osds);
        list_del_init(&osd->o_osd_lru);
-       ceph_con_close(&osd->o_con);
-       put_osd(osd);
+       rb_erase(&osd->o_node, &osdc->osds);
+       RB_CLEAR_NODE(&osd->o_node);
+}
+
+static void remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
+{
+       dout("%s %p osd%d\n", __func__, osd, osd->o_osd);
+
+       if (!RB_EMPTY_NODE(&osd->o_node)) {
+               ceph_con_close(&osd->o_con);
+               __remove_osd(osdc, osd);
+               put_osd(osd);
+       }
 }
 
 static void remove_all_osds(struct ceph_osd_client *osdc)
@@ -1065,7 +1076,7 @@ static void remove_all_osds(struct ceph_osd_client *osdc)
        while (!RB_EMPTY_ROOT(&osdc->osds)) {
                struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
                                                struct ceph_osd, o_node);
-               __remove_osd(osdc, osd);
+               remove_osd(osdc, osd);
        }
        mutex_unlock(&osdc->request_mutex);
 }
@@ -1106,7 +1117,7 @@ static void remove_old_osds(struct ceph_osd_client *osdc)
        list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
                if (time_before(jiffies, osd->lru_ttl))
                        break;
-               __remove_osd(osdc, osd);
+               remove_osd(osdc, osd);
        }
        mutex_unlock(&osdc->request_mutex);
 }
@@ -1121,8 +1132,7 @@ static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
        dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
        if (list_empty(&osd->o_requests) &&
            list_empty(&osd->o_linger_requests)) {
-               __remove_osd(osdc, osd);
-
+               remove_osd(osdc, osd);
                return -ENODEV;
        }
 
@@ -1926,6 +1936,7 @@ static void reset_changed_osds(struct ceph_osd_client *osdc)
 {
        struct rb_node *p, *n;
 
+       dout("%s %p\n", __func__, osdc);
        for (p = rb_first(&osdc->osds); p; p = n) {
                struct ceph_osd *osd = rb_entry(p, struct ceph_osd, o_node);
 
index 899d0319f2b273e47a73efe4caac7c14bfdfe219..2274e723a3df6fdf393543281cd56bcb6284b41c 100644 (file)
@@ -348,7 +348,7 @@ config NET_SCH_PLUG
 comment "Classification"
 
 config NET_CLS
-       boolean
+       bool
 
 config NET_CLS_BASIC
        tristate "Elementary classification (BASIC)"
index 651f49ab601fbdd75eed30ddc0a29c9cb4369e54..9dd0ea8db463acc9daba0c51be89b1f17ec8f17d 100644 (file)
@@ -309,12 +309,15 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
        struct rpc_xprt *xprt = req->rq_xprt;
        struct svc_serv *bc_serv = xprt->bc_serv;
 
+       spin_lock(&xprt->bc_pa_lock);
+       list_del(&req->rq_bc_pa_list);
+       spin_unlock(&xprt->bc_pa_lock);
+
        req->rq_private_buf.len = copied;
        set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
 
        dprintk("RPC:       add callback request to list\n");
        spin_lock(&bc_serv->sv_cb_lock);
-       list_del(&req->rq_bc_pa_list);
        list_add(&req->rq_bc_list, &bc_serv->sv_cb_list);
        wake_up(&bc_serv->sv_cb_waitq);
        spin_unlock(&bc_serv->sv_cb_lock);
index 155754588fd65ab656736ab342a4851b4de0bb8b..86a47e17cfaf7cba058a0ed49512954789b9d9a0 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 config NET_SWITCHDEV
-       boolean "Switch (and switch-ish) device support (EXPERIMENTAL)"
+       bool "Switch (and switch-ish) device support (EXPERIMENTAL)"
        depends on INET
        ---help---
          This module provides glue between core networking code and device
index edd2794569db96a052579b3700b30ac9335510a4..d3437b82ac256cb7bca2527f0cfe43e07f66a1cb 100644 (file)
@@ -129,17 +129,15 @@ cc-disable-warning = $(call try-run,\
        $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) -W$(strip $(1)) -c -x c /dev/null -o "$$TMP",-Wno-$(strip $(1)))
 
 # cc-version
-# Usage gcc-ver := $(call cc-version)
 cc-version = $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-version.sh $(CC))
 
 # cc-fullversion
-# Usage gcc-ver := $(call cc-fullversion)
 cc-fullversion = $(shell $(CONFIG_SHELL) \
        $(srctree)/scripts/gcc-version.sh -p $(CC))
 
 # cc-ifversion
 # Usage:  EXTRA_CFLAGS += $(call cc-ifversion, -lt, 0402, -O1)
-cc-ifversion = $(shell [ $(call cc-version, $(CC)) $(1) $(2) ] && echo $(3))
+cc-ifversion = $(shell [ $(cc-version) $(1) $(2) ] && echo $(3) || echo $(4))
 
 # cc-ldoption
 # Usage: ldflags += $(call cc-ldoption, -Wl$(comma)--hash-style=both)
@@ -157,13 +155,12 @@ ld-option = $(call try-run,\
 ar-option = $(call try-run, $(AR) rc$(1) "$$TMP",$(1),$(2))
 
 # ld-version
-# Usage: $(call ld-version)
 # Note this is mainly for HJ Lu's 3 number binutil versions
 ld-version = $(shell $(LD) --version | $(srctree)/scripts/ld-version.sh)
 
 # ld-ifversion
 # Usage:  $(call ld-ifversion, -ge, 22252, y)
-ld-ifversion = $(shell [ $(call ld-version) $(1) $(2) ] && echo $(3))
+ld-ifversion = $(shell [ $(ld-version) $(1) $(2) ] && echo $(3) || echo $(4))
 
 ######
 
index 627f8cbbedb88ca29667bbf1f88eb2004d5ee461..55c96cb8070f1130352ab85f77440dfd497d5677 100644 (file)
@@ -70,9 +70,6 @@ ifneq ($(strip $(__clean-files)),)
 endif
 ifneq ($(strip $(__clean-dirs)),)
        +$(call cmd,cleandir)
-endif
-ifneq ($(strip $(clean-rule)),)
-       +$(clean-rule)
 endif
        @:
 
index f88d90f20228e8783b5ca39accc436140af7b3ff..28df18dd1147f60b555fea428afe42513b592410 100644 (file)
@@ -59,6 +59,7 @@ static void conf_message(const char *fmt, ...)
        va_start(ap, fmt);
        if (conf_message_callback)
                conf_message_callback(fmt, ap);
+       va_end(ap);
 }
 
 const char *conf_get_configname(void)
index 81b0c61bb9e2060c18fb9dd315e8a9e0ae8a195a..2ab91b9b100dc6f6cfba663d05580e14930a3def 100755 (executable)
@@ -77,6 +77,11 @@ while true; do
        esac
 done
 
+if [ "$#" -lt 2 ] ; then
+       usage
+       exit
+fi
+
 INITFILE=$1
 shift;
 
index 59726243c2ebab1a9263019a5ff8eb759b2fdda7..88dbf23b697082aa899c6b414b65cf0e4ecbf67d 100755 (executable)
@@ -217,9 +217,20 @@ else
 fi
 maintainer="$name <$email>"
 
+# Try to determine distribution
+if [ -n "$KDEB_CHANGELOG_DIST" ]; then
+        distribution=$KDEB_CHANGELOG_DIST
+elif distribution=$(lsb_release -cs 2>/dev/null) && [ -n "$distribution" ]; then
+        : # nothing to do in this case
+else
+        distribution="unstable"
+        echo >&2 "Using default distribution of 'unstable' in the changelog"
+        echo >&2 "Install lsb-release or set \$KDEB_CHANGELOG_DIST explicitly"
+fi
+
 # Generate a simple changelog template
 cat <<EOF > debian/changelog
-linux-upstream ($packageversion) unstable; urgency=low
+linux-upstream ($packageversion) $distribution; urgency=low
 
   * Custom built Linux kernel.
 
@@ -233,10 +244,10 @@ This is a packacked upstream version of the Linux kernel.
 The sources may be found at most Linux ftp sites, including:
 ftp://ftp.kernel.org/pub/linux/kernel
 
-Copyright: 1991 - 2009 Linus Torvalds and others.
+Copyright: 1991 - 2015 Linus Torvalds and others.
 
 The git repository for mainline kernel development is at:
-git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
index 97130f88838bc2ad385b5ccd69bf0dfc51acae95..e4ea6266386662c2c88445743ff2e107b665230a 100644 (file)
@@ -112,9 +112,9 @@ static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
        return aa_dfa_next(dfa, start, 0);
 }
 
-static inline bool mediated_filesystem(struct inode *inode)
+static inline bool mediated_filesystem(struct dentry *dentry)
 {
-       return !(inode->i_sb->s_flags & MS_NOUSER);
+       return !(dentry->d_sb->s_flags & MS_NOUSER);
 }
 
 #endif /* __APPARMOR_H */
index 65ca451a764db1a38db4c8a68be1acd015f14d4f..107db88b1d5f9d1d5dda20c0636f229738fec8bd 100644 (file)
@@ -226,7 +226,7 @@ static int common_perm_rm(int op, struct path *dir,
        struct inode *inode = dentry->d_inode;
        struct path_cond cond = { };
 
-       if (!inode || !dir->mnt || !mediated_filesystem(inode))
+       if (!inode || !dir->mnt || !mediated_filesystem(dentry))
                return 0;
 
        cond.uid = inode->i_uid;
@@ -250,7 +250,7 @@ static int common_perm_create(int op, struct path *dir, struct dentry *dentry,
 {
        struct path_cond cond = { current_fsuid(), mode };
 
-       if (!dir->mnt || !mediated_filesystem(dir->dentry->d_inode))
+       if (!dir->mnt || !mediated_filesystem(dir->dentry))
                return 0;
 
        return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
@@ -285,7 +285,7 @@ static int apparmor_path_truncate(struct path *path)
                                  path->dentry->d_inode->i_mode
        };
 
-       if (!path->mnt || !mediated_filesystem(path->dentry->d_inode))
+       if (!path->mnt || !mediated_filesystem(path->dentry))
                return 0;
 
        return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE,
@@ -305,7 +305,7 @@ static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir,
        struct aa_profile *profile;
        int error = 0;
 
-       if (!mediated_filesystem(old_dentry->d_inode))
+       if (!mediated_filesystem(old_dentry))
                return 0;
 
        profile = aa_current_profile();
@@ -320,7 +320,7 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
        struct aa_profile *profile;
        int error = 0;
 
-       if (!mediated_filesystem(old_dentry->d_inode))
+       if (!mediated_filesystem(old_dentry))
                return 0;
 
        profile = aa_current_profile();
@@ -346,7 +346,7 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
 
 static int apparmor_path_chmod(struct path *path, umode_t mode)
 {
-       if (!mediated_filesystem(path->dentry->d_inode))
+       if (!mediated_filesystem(path->dentry))
                return 0;
 
        return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD);
@@ -358,7 +358,7 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
                                   path->dentry->d_inode->i_mode
        };
 
-       if (!mediated_filesystem(path->dentry->d_inode))
+       if (!mediated_filesystem(path->dentry))
                return 0;
 
        return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
@@ -366,7 +366,7 @@ static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
 
 static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
 {
-       if (!mediated_filesystem(dentry->d_inode))
+       if (!mediated_filesystem(dentry))
                return 0;
 
        return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry,
@@ -379,7 +379,7 @@ static int apparmor_file_open(struct file *file, const struct cred *cred)
        struct aa_profile *profile;
        int error = 0;
 
-       if (!mediated_filesystem(file_inode(file)))
+       if (!mediated_filesystem(file->f_path.dentry))
                return 0;
 
        /* If in exec, permission is handled by bprm hooks.
@@ -432,7 +432,7 @@ static int common_file_perm(int op, struct file *file, u32 mask)
        BUG_ON(!fprofile);
 
        if (!file->f_path.mnt ||
-           !mediated_filesystem(file_inode(file)))
+           !mediated_filesystem(file->f_path.dentry))
                return 0;
 
        profile = __aa_current_profile();
index 35b394a75d762dd6a4e935f3ffe1d5b4566a2885..71e0e3a15b9dc3bbae6b73cd1d8134768f67d2c5 100644 (file)
@@ -114,7 +114,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
         *    security_path hooks as a deleted dentry except without an inode
         *    allocated.
         */
-       if (d_unlinked(path->dentry) && path->dentry->d_inode &&
+       if (d_unlinked(path->dentry) && d_is_positive(path->dentry) &&
            !(flags & PATH_MEDIATE_DELETED)) {
                        error = -ENOENT;
                        goto out;
index 8e7ca62078abe85f988a90b796f265fc2172d526..131a3c49f766444f167f88d19b712ef80ee47a66 100644 (file)
@@ -203,7 +203,7 @@ void securityfs_remove(struct dentry *dentry)
        mutex_lock(&parent->d_inode->i_mutex);
        if (positive(dentry)) {
                if (dentry->d_inode) {
-                       if (S_ISDIR(dentry->d_inode->i_mode))
+                       if (d_is_dir(dentry))
                                simple_rmdir(parent->d_inode, dentry);
                        else
                                simple_unlink(parent->d_inode, dentry);
index b76235ae4786f2c34bc26072fad69bf457b124ce..73c457bf5a4aea01eb56b36613ee69f929428765 100644 (file)
@@ -16,7 +16,7 @@ config INTEGRITY
 if INTEGRITY
 
 config INTEGRITY_SIGNATURE
-       boolean "Digital signature verification using multiple keyrings"
+       bool "Digital signature verification using multiple keyrings"
        depends on KEYS
        default n
        select SIGNATURE
@@ -30,7 +30,7 @@ config INTEGRITY_SIGNATURE
          usually only added from initramfs.
 
 config INTEGRITY_ASYMMETRIC_KEYS
-       boolean "Enable asymmetric keys support"
+       bool "Enable asymmetric keys support"
        depends on INTEGRITY_SIGNATURE
        default n
         select ASYMMETRIC_KEY_TYPE
index df586fa00ef1e9891efae42d629e952c05eaec4d..bf19723cf1178959f82eb9782dd3ee49ea46acaf 100644 (file)
@@ -1,5 +1,5 @@
 config EVM
-       boolean "EVM support"
+       bool "EVM support"
        select KEYS
        select ENCRYPTED_KEYS
        select CRYPTO_HMAC
index 29c39e0b03ed7e3048d5fed858f31d40dc2f8d3d..4d1a54190388df96dddb7ff951c681dc28bab866 100644 (file)
@@ -1799,7 +1799,7 @@ static inline int may_rename(struct inode *old_dir,
 
        old_dsec = old_dir->i_security;
        old_isec = old_dentry->d_inode->i_security;
-       old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
+       old_is_dir = d_is_dir(old_dentry);
        new_dsec = new_dir->i_security;
 
        ad.type = LSM_AUDIT_DATA_DENTRY;
@@ -1822,14 +1822,14 @@ static inline int may_rename(struct inode *old_dir,
 
        ad.u.dentry = new_dentry;
        av = DIR__ADD_NAME | DIR__SEARCH;
-       if (new_dentry->d_inode)
+       if (d_is_positive(new_dentry))
                av |= DIR__REMOVE_NAME;
        rc = avc_has_perm(sid, new_dsec->sid, SECCLASS_DIR, av, &ad);
        if (rc)
                return rc;
-       if (new_dentry->d_inode) {
+       if (d_is_positive(new_dentry)) {
                new_isec = new_dentry->d_inode->i_security;
-               new_is_dir = S_ISDIR(new_dentry->d_inode->i_mode);
+               new_is_dir = d_is_dir(new_dentry);
                rc = avc_has_perm(sid, new_isec->sid,
                                  new_isec->sclass,
                                  (new_is_dir ? DIR__RMDIR : FILE__UNLINK), &ad);
index ed94f6f836e75baf9086f553c78a2d325ea99285..c934311812f1a777093c44a89543dcae924b8568 100644 (file)
@@ -855,7 +855,7 @@ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
        rc = smk_curacc(isp, MAY_WRITE, &ad);
        rc = smk_bu_inode(old_dentry->d_inode, MAY_WRITE, rc);
 
-       if (rc == 0 && new_dentry->d_inode != NULL) {
+       if (rc == 0 && d_is_positive(new_dentry)) {
                isp = smk_of_inode(new_dentry->d_inode);
                smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
                rc = smk_curacc(isp, MAY_WRITE, &ad);
@@ -961,7 +961,7 @@ static int smack_inode_rename(struct inode *old_inode,
        rc = smk_curacc(isp, MAY_READWRITE, &ad);
        rc = smk_bu_inode(old_dentry->d_inode, MAY_READWRITE, rc);
 
-       if (rc == 0 && new_dentry->d_inode != NULL) {
+       if (rc == 0 && d_is_positive(new_dentry)) {
                isp = smk_of_inode(new_dentry->d_inode);
                smk_ad_setfield_u_fs_path_dentry(&ad, new_dentry);
                rc = smk_curacc(isp, MAY_READWRITE, &ad);
index 400390790745212764bd99c9178dceea031382ef..c151a1869597f8155a0296f89fafa61cc65f447d 100644 (file)
@@ -905,11 +905,9 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1,
            !tomoyo_get_realpath(&buf2, path2))
                goto out;
        switch (operation) {
-               struct dentry *dentry;
        case TOMOYO_TYPE_RENAME:
        case TOMOYO_TYPE_LINK:
-               dentry = path1->dentry;
-               if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode))
+               if (!d_is_dir(path1->dentry))
                        break;
                /* fall through */
        case TOMOYO_TYPE_PIVOT_ROOT:
index 9b6470cdcf246f6e89ecf428d84dee0a25ad361e..7ba937399ac783c5cbf3c92f7497227d2feaa3cc 100644 (file)
@@ -269,6 +269,9 @@ do_control(struct snd_midi_op *ops, void *drv, struct snd_midi_channel_set *chse
 {
        int  i;
 
+       if (control >= ARRAY_SIZE(chan->control))
+               return;
+
        /* Switches */
        if ((control >=64 && control <=69) || (control >= 80 && control <= 83)) {
                /* These are all switches; either off or on so set to 0 or 127 */
index 227990bc02e38cb459921ea5f08d7945eb540b51..375e94f4cf5265ba19378f96998658a5e4fed91c 100644 (file)
@@ -329,8 +329,8 @@ static int hda_tegra_init_chip(struct azx *chip, struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        hda->regs = devm_ioremap_resource(dev, res);
-       if (IS_ERR(chip->remap_addr))
-               return PTR_ERR(chip->remap_addr);
+       if (IS_ERR(hda->regs))
+               return PTR_ERR(hda->regs);
 
        chip->remap_addr = hda->regs + HDA_BAR0;
        chip->addr = res->start + HDA_BAR0;
index ddb93083a2af1ea15ffe2374a81193481a2f0da1..b2b24a8b3dac8c49d2bb55c3142eecd967e73608 100644 (file)
@@ -4937,6 +4937,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
        SND_PCI_QUIRK(0x103c, 0x225f, "HP", ALC280_FIXUP_HP_GPIO2_MIC_HOTKEY),
        /* ALC282 */
+       SND_PCI_QUIRK(0x103c, 0x21f9, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2210, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2214, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
        SND_PCI_QUIRK(0x103c, 0x2236, "HP", ALC269_FIXUP_HP_LINE1_MIC1_LED),
index 2c363fdca9fd00e0b82b99d029a4bd53a9728467..ca67f896d11757bc05be43804d367175b282dad0 100644 (file)
@@ -6082,6 +6082,9 @@ static int snd_hdspm_playback_open(struct snd_pcm_substream *substream)
                snd_pcm_hw_constraint_minmax(runtime,
                                             SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
                                             64, 8192);
+               snd_pcm_hw_constraint_minmax(runtime,
+                                            SNDRV_PCM_HW_PARAM_PERIODS,
+                                            2, 2);
                break;
        }
 
@@ -6156,6 +6159,9 @@ static int snd_hdspm_capture_open(struct snd_pcm_substream *substream)
                snd_pcm_hw_constraint_minmax(runtime,
                                             SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
                                             64, 8192);
+               snd_pcm_hw_constraint_minmax(runtime,
+                                            SNDRV_PCM_HW_PARAM_PERIODS,
+                                            2, 2);
                break;
        }
 
index d6fa9d5514e1923997066ec1e14c7dafac281387..7e21e8f85885e436cb896a806dde544d3cd8ec4d 100644 (file)
@@ -91,7 +91,8 @@ static const struct snd_pcm_hardware hsw_pcm_hardware = {
                                  SNDRV_PCM_INFO_INTERLEAVED |
                                  SNDRV_PCM_INFO_PAUSE |
                                  SNDRV_PCM_INFO_RESUME |
-                                 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP,
+                                 SNDRV_PCM_INFO_NO_PERIOD_WAKEUP |
+                                 SNDRV_PCM_INFO_DRAIN_TRIGGER,
        .formats                = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE |
                                  SNDRV_PCM_FMTBIT_S32_LE,
        .period_bytes_min       = PAGE_SIZE,
index 03fed6611d9e83d489e2ab66d94320c84eed4c18..2ed260b10f6dc02cd129550ba1067c878034cb07 100644 (file)
@@ -303,6 +303,11 @@ static int set_sample_rate_v1(struct snd_usb_audio *chip, int iface,
                return err;
        }
 
+       /* Don't check the sample rate for devices which we know don't
+        * support reading */
+       if (snd_usb_get_sample_rate_quirk(chip))
+               return 0;
+
        if ((err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC_GET_CUR,
                                   USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_IN,
                                   UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep,
index 99b63a7902f302f4a432425d7a6c3d23dbd571f7..81b7da8e56d39e352a5b629bbdf73aa460889b4d 100644 (file)
@@ -302,14 +302,17 @@ static void line6_data_received(struct urb *urb)
 /*
        Read data from device.
 */
-int line6_read_data(struct usb_line6 *line6, int address, void *data,
-                   size_t datalen)
+int line6_read_data(struct usb_line6 *line6, unsigned address, void *data,
+                   unsigned datalen)
 {
        struct usb_device *usbdev = line6->usbdev;
        int ret;
        unsigned char len;
        unsigned count;
 
+       if (address > 0xffff || datalen > 0xff)
+               return -EINVAL;
+
        /* query the serial number: */
        ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
                              USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
@@ -370,14 +373,17 @@ EXPORT_SYMBOL_GPL(line6_read_data);
 /*
        Write data to device.
 */
-int line6_write_data(struct usb_line6 *line6, int address, void *data,
-                    size_t datalen)
+int line6_write_data(struct usb_line6 *line6, unsigned address, void *data,
+                    unsigned datalen)
 {
        struct usb_device *usbdev = line6->usbdev;
        int ret;
        unsigned char status;
        int count;
 
+       if (address > 0xffff || datalen > 0xffff)
+               return -EINVAL;
+
        ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67,
                              USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
                              0x0022, address, data, datalen,
index 5d20294d64f43be80334bb0a902deb7ff7e63da0..7da643e79e3b50426c3cbaf4d46184d7283f3848 100644 (file)
@@ -147,8 +147,8 @@ struct usb_line6 {
 
 extern char *line6_alloc_sysex_buffer(struct usb_line6 *line6, int code1,
                                      int code2, int size);
-extern int line6_read_data(struct usb_line6 *line6, int address, void *data,
-                          size_t datalen);
+extern int line6_read_data(struct usb_line6 *line6, unsigned address,
+                          void *data, unsigned datalen);
 extern int line6_read_serial_number(struct usb_line6 *line6,
                                    u32 *serial_number);
 extern int line6_send_raw_message_async(struct usb_line6 *line6,
@@ -161,8 +161,8 @@ extern void line6_start_timer(struct timer_list *timer, unsigned long msecs,
                              void (*function)(unsigned long),
                              unsigned long data);
 extern int line6_version_request_async(struct usb_line6 *line6);
-extern int line6_write_data(struct usb_line6 *line6, int address, void *data,
-                           size_t datalen);
+extern int line6_write_data(struct usb_line6 *line6, unsigned address,
+                           void *data, unsigned datalen);
 
 int line6_probe(struct usb_interface *interface,
                const struct usb_device_id *id,
index a7398412310bd53e00b84c2aa6c567ac451a4e7a..753a47de8459b7a0b505e72d2f660793d9ede885 100644 (file)
@@ -1111,6 +1111,11 @@ void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
        }
 }
 
+bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
+{
+       /* MS Lifecam HD-5000 doesn't support reading the sample rate. */
+       return chip->usb_id == USB_ID(0x045E, 0x076D);
+}
 
 /* Marantz/Denon USB DACs need a vendor cmd to switch
  * between PCM and native DSD mode
@@ -1122,6 +1127,7 @@ int snd_usb_select_mode_quirk(struct snd_usb_substream *subs,
        int err;
 
        switch (subs->stream->chip->usb_id) {
+       case USB_ID(0x154e, 0x1003): /* Denon DA-300USB */
        case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
        case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
 
@@ -1201,6 +1207,7 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
            (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) {
 
                switch (le16_to_cpu(dev->descriptor.idProduct)) {
+               case 0x1003: /* Denon DA300-USB */
                case 0x3005: /* Marantz HD-DAC1 */
                case 0x3006: /* Marantz SA-14S1 */
                        mdelay(20);
@@ -1262,6 +1269,7 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
 
        /* Denon/Marantz devices with USB DAC functionality */
        switch (chip->usb_id) {
+       case USB_ID(0x154e, 0x1003): /* Denon DA300-USB */
        case USB_ID(0x154e, 0x3005): /* Marantz HD-DAC1 */
        case USB_ID(0x154e, 0x3006): /* Marantz SA-14S1 */
                if (fp->altsetting == 2)
index 1b862386577d036d262a2d760f4f3551337ac8be..2cd71ed1201f93ea8e6b54e1d487a84751e09885 100644 (file)
@@ -21,6 +21,8 @@ int snd_usb_apply_boot_quirk(struct usb_device *dev,
 void snd_usb_set_format_quirk(struct snd_usb_substream *subs,
                              struct audioformat *fmt);
 
+bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip);
+
 int snd_usb_is_big_endian_format(struct snd_usb_audio *chip,
                                 struct audioformat *fp);