Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 30 Dec 2014 18:45:47 +0000 (10:45 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 30 Dec 2014 18:45:47 +0000 (10:45 -0800)
Pull networking fixes from David Miller:

 1) Fix double SKB free in bluetooth 6lowpan layer, from Jukka Rissanen.

 2) Fix receive checksum handling in enic driver, from Govindarajulu
    Varadarajan.

 3) Fix NAPI poll list corruption in virtio_net and caif_virtio, from
    Herbert Xu.  Also, add code to detect drivers that have this mistake
    in the future.

 4) Fix doorbell endianness handling in mlx4 driver, from Amir Vadai.

 5) Don't clobber IP6CB() before xfrm6_policy_check() is called in TCP
    input path,f rom Nicolas Dichtel.

 6) Fix MPLS action validation in openvswitch, from Pravin B Shelar.

 7) Fix double SKB free in vxlan driver, also from Pravin.

 8) When we scrub a packet, which happens when we are switching the
    context of the packet (namespace, etc.), we should reset the
    secmark.  From Thomas Graf.

 9) ->ndo_gso_check() needs to do more than return true/false, it also
    has to allow the driver to clear netdev feature bits in order for
    the caller to be able to proceed properly.  From Jesse Gross.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (62 commits)
  genetlink: A genl_bind() to an out-of-range multicast group should not WARN().
  netlink/genetlink: pass network namespace to bind/unbind
  ne2k-pci: Add pci_disable_device in error handling
  bonding: change error message to debug message in __bond_release_one()
  genetlink: pass multicast bind/unbind to families
  netlink: call unbind when releasing socket
  netlink: update listeners directly when removing socket
  genetlink: pass only network namespace to genl_has_listeners()
  netlink: rename netlink_unbind() to netlink_undo_bind()
  net: Generalize ndo_gso_check to ndo_features_check
  net: incorrect use of init_completion fixup
  neigh: remove next ptr from struct neigh_table
  net: xilinx: Remove unnecessary temac_property in the driver
  net: phy: micrel: use generic config_init for KSZ8021/KSZ8031
  net/core: Handle csum for CHECKSUM_COMPLETE VXLAN forwarding
  openvswitch: fix odd_ptr_err.cocci warnings
  Bluetooth: Fix accepting connections when not using mgmt
  Bluetooth: Fix controller configuration with HCI_QUIRK_INVALID_BDADDR
  brcmfmac: Do not crash if platform data is not populated
  ipw2200: select CFG80211_WEXT
  ...

548 files changed:
.gitignore
CREDITS
Documentation/clk.txt
Documentation/devicetree/bindings/clock/exynos4415-clock.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/exynos7-clock.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/marvell,mmp2.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/marvell,pxa168.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/marvell,pxa910.txt [new file with mode: 0644]
Documentation/devicetree/bindings/clock/renesas,cpg-div6-clocks.txt
Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
Documentation/devicetree/bindings/clock/sunxi.txt
Documentation/devicetree/bindings/i2c/i2c-opal.txt [new file with mode: 0644]
Documentation/x86/intel_mpx.txt
MAINTAINERS
Makefile
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/mmp2-brownstone.dts
arch/arm/boot/dts/mmp2.dtsi
arch/arm/boot/dts/pxa168-aspenite.dts
arch/arm/boot/dts/pxa168.dtsi
arch/arm/boot/dts/pxa910-dkb.dts
arch/arm/boot/dts/pxa910.dtsi
arch/arm/boot/dts/sun4i-a10.dtsi
arch/arm/boot/dts/sun5i-a10s.dtsi
arch/arm/boot/dts/sun5i-a13.dtsi
arch/arm/boot/dts/sun6i-a31.dtsi
arch/arm/boot/dts/sun7i-a20.dtsi
arch/arm/boot/dts/sun8i-a23.dtsi
arch/arm/configs/ape6evm_defconfig
arch/arm/configs/armadillo800eva_defconfig
arch/arm/configs/bcm_defconfig
arch/arm/configs/bockw_defconfig
arch/arm/configs/davinci_all_defconfig
arch/arm/configs/exynos_defconfig
arch/arm/configs/ezx_defconfig
arch/arm/configs/hisi_defconfig
arch/arm/configs/imote2_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/keystone_defconfig
arch/arm/configs/kzm9g_defconfig
arch/arm/configs/lager_defconfig
arch/arm/configs/mackerel_defconfig
arch/arm/configs/marzen_defconfig
arch/arm/configs/omap1_defconfig
arch/arm/configs/prima2_defconfig
arch/arm/configs/sama5_defconfig
arch/arm/configs/shmobile_defconfig
arch/arm/configs/sunxi_defconfig
arch/arm/configs/tegra_defconfig
arch/arm/configs/u8500_defconfig
arch/arm/configs/vt8500_v6_v7_defconfig
arch/arm/include/asm/spinlock.h
arch/arm/mach-mmp/Kconfig
arch/arm/mach-mmp/mmp-dt.c
arch/arm/mach-mmp/mmp2-dt.c
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/cclock3xxx_data.c
arch/arm/mach-omap2/dpll3xxx.c
arch/arm/mach-omap2/dpll44xx.c
arch/arm64/configs/defconfig
arch/arm64/include/asm/dma-mapping.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/spinlock.h
arch/arm64/kernel/suspend.c
arch/cris/arch-v10/lib/usercopy.c
arch/cris/arch-v32/drivers/Kconfig
arch/cris/arch-v32/drivers/Makefile
arch/cris/arch-v32/drivers/i2c.h
arch/cris/arch-v32/drivers/sync_serial.c
arch/cris/arch-v32/kernel/debugport.c
arch/cris/arch-v32/kernel/time.c
arch/cris/arch-v32/lib/usercopy.c
arch/cris/arch-v32/mach-fs/pinmux.c
arch/cris/include/arch-v32/mach-fs/mach/pinmux.h
arch/cris/include/asm/Kbuild
arch/cris/include/uapi/asm/Kbuild
arch/cris/kernel/crisksyms.c
arch/cris/kernel/traps.c
arch/cris/mm/init.c
arch/cris/mm/ioremap.c
arch/hexagon/include/asm/cache.h
arch/hexagon/include/asm/cacheflush.h
arch/hexagon/include/asm/io.h
arch/hexagon/kernel/setup.c
arch/hexagon/kernel/traps.c
arch/hexagon/kernel/vmlinux.lds.S
arch/hexagon/mm/cache.c
arch/hexagon/mm/ioremap.c
arch/ia64/include/asm/percpu.h
arch/mips/alchemy/common/clock.c
arch/mips/configs/db1xxx_defconfig
arch/mips/configs/lemote2f_defconfig
arch/mips/configs/loongson3_defconfig
arch/mips/configs/nlm_xlp_defconfig
arch/mips/configs/nlm_xlr_defconfig
arch/mips/mm/gup.c
arch/parisc/include/asm/ldcw.h
arch/powerpc/configs/ps3_defconfig
arch/powerpc/include/asm/cpuidle.h [new file with mode: 0644]
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/ppc-opcode.h
arch/powerpc/include/asm/processor.h
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/syscall.h
arch/powerpc/include/asm/uaccess.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/exceptions-64s.S
arch/powerpc/kernel/idle_power7.S
arch/powerpc/kernel/smp.c
arch/powerpc/perf/hv-24x7.c
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/powernv.h
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/powernv/subcore.c
arch/powerpc/platforms/powernv/subcore.h
arch/s390/kvm/gaccess.c
arch/sh/Kconfig
arch/sh/configs/apsh4ad0a_defconfig
arch/sh/configs/sdk7786_defconfig
arch/x86/Kconfig
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/pci.h
arch/x86/include/asm/pci_x86.h
arch/x86/include/asm/spinlock.h
arch/x86/include/uapi/asm/ldt.h
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/apic/Makefile
arch/x86/kernel/apic/apic.c
arch/x86/kernel/apic/htirq.c [new file with mode: 0644]
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/msi.c [new file with mode: 0644]
arch/x86/kernel/apic/vector.c [new file with mode: 0644]
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/crash.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/irqinit.c
arch/x86/kernel/machine_kexec_32.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/reboot.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/tls.c
arch/x86/kernel/traps.c
arch/x86/kvm/mmu.c
arch/x86/kvm/vmx.c
arch/x86/lguest/boot.c
arch/x86/mm/gup.c
arch/x86/pci/intel_mid_pci.c
arch/x86/pci/irq.c
arch/x86/platform/uv/uv_irq.c
drivers/acpi/pci_irq.c
drivers/acpi/processor_core.c
drivers/acpi/processor_idle.c
drivers/acpi/resource.c
drivers/acpi/video.c
drivers/base/power/domain.c
drivers/base/power/opp.c
drivers/char/agp/ali-agp.c
drivers/char/agp/amd64-agp.c
drivers/char/agp/ati-agp.c
drivers/char/agp/backend.c
drivers/char/agp/intel-agp.c
drivers/char/agp/intel-gtt.c
drivers/char/agp/nvidia-agp.c
drivers/char/agp/via-agp.c
drivers/char/ipmi/ipmi_msghandler.c
drivers/char/ipmi/ipmi_ssif.c
drivers/clk/at91/clk-programmable.c
drivers/clk/bcm/clk-kona.c
drivers/clk/clk-composite.c
drivers/clk/clk-mux.c
drivers/clk/clk-s2mps11.c
drivers/clk/clk.c
drivers/clk/hisilicon/clk-hi3620.c
drivers/clk/mmp/Makefile
drivers/clk/mmp/clk-frac.c
drivers/clk/mmp/clk-gate.c [new file with mode: 0644]
drivers/clk/mmp/clk-mix.c [new file with mode: 0644]
drivers/clk/mmp/clk-mmp2.c
drivers/clk/mmp/clk-of-mmp2.c [new file with mode: 0644]
drivers/clk/mmp/clk-of-pxa168.c [new file with mode: 0644]
drivers/clk/mmp/clk-of-pxa910.c [new file with mode: 0644]
drivers/clk/mmp/clk-pxa168.c
drivers/clk/mmp/clk-pxa910.c
drivers/clk/mmp/clk.c [new file with mode: 0644]
drivers/clk/mmp/clk.h
drivers/clk/mmp/reset.c [new file with mode: 0644]
drivers/clk/mmp/reset.h [new file with mode: 0644]
drivers/clk/pxa/Makefile
drivers/clk/pxa/clk-pxa.c
drivers/clk/pxa/clk-pxa.h
drivers/clk/pxa/clk-pxa25x.c [new file with mode: 0644]
drivers/clk/pxa/clk-pxa27x.c
drivers/clk/qcom/clk-pll.c
drivers/clk/qcom/clk-rcg.c
drivers/clk/qcom/clk-rcg2.c
drivers/clk/rockchip/Makefile
drivers/clk/rockchip/clk-mmc-phase.c [new file with mode: 0644]
drivers/clk/rockchip/clk-pll.c
drivers/clk/rockchip/clk-rk3188.c
drivers/clk/rockchip/clk-rk3288.c
drivers/clk/rockchip/clk.c
drivers/clk/rockchip/clk.h
drivers/clk/samsung/Makefile
drivers/clk/samsung/clk-exynos-audss.c
drivers/clk/samsung/clk-exynos4.c
drivers/clk/samsung/clk-exynos4415.c [new file with mode: 0644]
drivers/clk/samsung/clk-exynos5260.c
drivers/clk/samsung/clk-exynos7.c [new file with mode: 0644]
drivers/clk/samsung/clk-pll.c
drivers/clk/samsung/clk-pll.h
drivers/clk/samsung/clk.c
drivers/clk/samsung/clk.h
drivers/clk/shmobile/clk-div6.c
drivers/clk/sunxi/Makefile
drivers/clk/sunxi/clk-a20-gmac.c
drivers/clk/sunxi/clk-factors.c
drivers/clk/sunxi/clk-factors.h
drivers/clk/sunxi/clk-mod0.c
drivers/clk/sunxi/clk-sun6i-ar100.c
drivers/clk/sunxi/clk-sun8i-mbus.c
drivers/clk/sunxi/clk-sun9i-core.c [new file with mode: 0644]
drivers/clk/sunxi/clk-sunxi.c
drivers/cpufreq/cpufreq-dt.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/powernow-k6.c
drivers/cpufreq/powernow-k7.c
drivers/cpufreq/speedstep-ich.c
drivers/cpuidle/cpuidle-powernv.c
drivers/cpuidle/governors/ladder.c
drivers/cpuidle/governors/menu.c
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
drivers/gpu/drm/amd/amdkfd/kfd_process.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/drm_atomic_helper.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp_kms.c
drivers/gpu/drm/msm/mdp/mdp_kms.h
drivers/gpu/drm/msm/msm_atomic.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_drv.h
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_ttm.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_kfd.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/tegra/gem.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-mv64xxx.c
drivers/i2c/busses/i2c-opal.c [new file with mode: 0644]
drivers/i2c/busses/i2c-sh_mobile.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/infiniband/ulp/srp/ib_srp.c
drivers/infiniband/ulp/srpt/ib_srpt.c
drivers/iommu/amd_iommu.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/irq_remapping.c
drivers/macintosh/Kconfig
drivers/macintosh/Makefile
drivers/macintosh/therm_pm72.c [deleted file]
drivers/macintosh/therm_pm72.h [deleted file]
drivers/md/dm-thin.c
drivers/md/dm.c
drivers/misc/cxl/context.c
drivers/misc/cxl/cxl.h
drivers/misc/cxl/file.c
drivers/misc/cxl/native.c
drivers/misc/cxl/pci.c
drivers/misc/cxl/sysfs.c
drivers/mmc/core/mmc.c
drivers/pci/Kconfig
drivers/pci/Makefile
drivers/pci/hotplug/ibmphp_core.c
drivers/pci/ioapic.c [deleted file]
drivers/powercap/intel_rapl.c
drivers/regulator/s2mps11.c
drivers/scsi/53c700.c
drivers/scsi/Kconfig
drivers/scsi/advansys.c
drivers/scsi/aic94xx/aic94xx_init.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/csiostor/csio_scsi.c
drivers/scsi/esas2r/esas2r_flash.c
drivers/scsi/esas2r/esas2r_main.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fnic/fnic_main.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/isci/init.c
drivers/scsi/libsas/sas_scsi_host.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/mpt2sas/mpt2sas_transport.c
drivers/scsi/mpt3sas/mpt3sas_scsih.c
drivers/scsi/mpt3sas/mpt3sas_transport.c
drivers/scsi/mvsas/mv_init.c
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/pmcraid.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/scsi.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_devinfo.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_spi.c
drivers/scsi/storvsc_drv.c
drivers/spi/spi-img-spfi.c
drivers/spi/spi-meson-spifc.c
drivers/spi/spi-sh-msiof.c
drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
drivers/staging/lustre/lustre/llite/dir.c
drivers/staging/lustre/lustre/llite/llite_lib.c
drivers/staging/lustre/lustre/llite/namei.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/iscsi/iscsi_target_transport.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/loopback/tcm_loop.c
drivers/target/loopback/tcm_loop.h
drivers/target/sbp/sbp_target.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_hba.c
drivers/target/target_core_iblock.c
drivers/target/target_core_internal.h
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_rd.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/thermal/cpu_cooling.c
drivers/thermal/db8500_cpufreq_cooling.c
drivers/thermal/imx_thermal.c
drivers/thermal/int340x_thermal/Makefile
drivers/thermal/int340x_thermal/acpi_thermal_rel.c
drivers/thermal/int340x_thermal/int3400_thermal.c
drivers/thermal/int340x_thermal/int3402_thermal.c
drivers/thermal/int340x_thermal/int3403_thermal.c
drivers/thermal/int340x_thermal/processor_thermal_device.c [new file with mode: 0644]
drivers/thermal/intel_powerclamp.c
drivers/thermal/rockchip_thermal.c
drivers/thermal/samsung/Kconfig
drivers/thermal/samsung/exynos_thermal_common.c
drivers/thermal/samsung/exynos_tmu.c
drivers/thermal/thermal_core.c
drivers/thermal/ti-soc-thermal/ti-thermal-common.c
drivers/tty/serial/8250/8250_omap.c
drivers/usb/gadget/legacy/tcm_usb_gadget.c
drivers/usb/host/isp1760-hcd.c
drivers/usb/host/oxu210hp-hcd.c
drivers/xen/xen-scsiback.c
fs/binfmt_misc.c
fs/btrfs/ctree.h
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/free-space-cache.c
fs/btrfs/volumes.c
fs/cifs/cifsglob.h
fs/cifs/netmisc.c
fs/cifs/readdir.c
fs/cifs/smb2misc.c
fs/cifs/smb2ops.c
fs/cifs/smb2pdu.h
fs/cifs/smb2transport.c
fs/ecryptfs/crypto.c
fs/ecryptfs/file.c
fs/ecryptfs/keystore.c
fs/ecryptfs/main.c
fs/ext4/move_extent.c
fs/isofs/rock.c
fs/kernfs/file.c
fs/proc/stat.c
fs/proc_namespace.c
fs/udf/dir.c
fs/udf/inode.c
fs/udf/namei.c
fs/udf/symlink.c
fs/udf/udfdecl.h
fs/udf/unicode.c
include/drm/drmP.h
include/drm/drm_gem.h
include/dt-bindings/clock/exynos4415.h [new file with mode: 0644]
include/dt-bindings/clock/exynos7-clk.h [new file with mode: 0644]
include/dt-bindings/clock/marvell,mmp2.h [new file with mode: 0644]
include/dt-bindings/clock/marvell,pxa168.h [new file with mode: 0644]
include/dt-bindings/clock/marvell,pxa910.h [new file with mode: 0644]
include/dt-bindings/clock/rk3288-cru.h
include/dt-bindings/thermal/thermal.h
include/linux/acpi.h
include/linux/audit.h
include/linux/clk-provider.h
include/linux/clk/ti.h
include/linux/compiler.h
include/linux/cpu_cooling.h
include/linux/cpuidle.h
include/linux/devfreq.h
include/linux/kernel_stat.h
include/linux/migrate.h
include/linux/mm.h
include/linux/pagemap.h
include/linux/pci.h
include/linux/pm_domain.h
include/linux/thermal.h
include/linux/uio.h
include/scsi/libsas.h
include/scsi/scsi_host.h
include/scsi/scsi_tcq.h
include/target/target_core_backend.h
include/target/target_core_backend_configfs.h [new file with mode: 0644]
include/target/target_core_base.h
include/trace/events/target.h
include/uapi/linux/audit.h
include/uapi/linux/target_core_user.h
init/do_mounts.c
kernel/audit.c
kernel/auditfilter.c
kernel/auditsc.c
kernel/events/core.c
kernel/irq/internals.h
kernel/irq/irqdesc.c
kernel/irq/proc.c
kernel/power/Kconfig
kernel/time/tick-sched.c
mm/filemap.c
mm/gup.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/rmap.c
mm/shmem.c
net/socket.c
scripts/Kbuild.include
scripts/Makefile.clean
scripts/Makefile.headersinst
scripts/coccinelle/misc/bugon.cocci
scripts/headers.sh
scripts/kconfig/mconf.c
scripts/kconfig/menu.c
scripts/package/mkspec
sound/firewire/oxfw/oxfw-pcm.c
sound/firewire/oxfw/oxfw-proc.c
sound/firewire/oxfw/oxfw-stream.c
sound/firewire/oxfw/oxfw.c
sound/pci/asihpi/hpi_internal.h
sound/pci/asihpi/hpi_version.h
sound/pci/asihpi/hpidspcd.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_generic.h
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_priv.h
sound/pci/hda/hda_sysfs.c
sound/pci/hda/patch_analog.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_via.c
sound/soc/atmel/atmel_ssc_dai.c
sound/soc/codecs/Kconfig
sound/soc/codecs/pcm512x-i2c.c
sound/soc/codecs/rt5645.c
sound/soc/intel/sst-haswell-pcm.c
sound/soc/intel/sst/sst_acpi.c
sound/soc/samsung/i2s.c
sound/usb/mixer_maps.c
sound/usb/mixer_scarlett.c
sound/usb/quirks.c
tools/include/asm-generic/bitops.h [new file with mode: 0644]
tools/include/asm-generic/bitops/__ffs.h [new file with mode: 0644]
tools/include/asm-generic/bitops/__fls.h [new file with mode: 0644]
tools/include/asm-generic/bitops/atomic.h [new file with mode: 0644]
tools/include/asm-generic/bitops/find.h [new file with mode: 0644]
tools/include/asm-generic/bitops/fls.h [new file with mode: 0644]
tools/include/asm-generic/bitops/fls64.h [new file with mode: 0644]
tools/include/linux/bitops.h [new file with mode: 0644]
tools/include/linux/log2.h [new file with mode: 0644]
tools/lib/api/fs/fs.c
tools/lib/api/fs/fs.h
tools/lib/util/find_next_bit.c [new file with mode: 0644]
tools/perf/Documentation/perf.txt
tools/perf/MANIFEST
tools/perf/Makefile.perf
tools/perf/bench/mem-memcpy.c
tools/perf/bench/mem-memset.c [deleted file]
tools/perf/builtin-buildid-cache.c
tools/perf/builtin-kvm.c
tools/perf/builtin-trace.c
tools/perf/perf.c
tools/perf/tests/attr/base-record
tools/perf/tests/attr/base-stat
tools/perf/ui/browsers/hists.c
tools/perf/ui/hist.c
tools/perf/util/build-id.c
tools/perf/util/callchain.c
tools/perf/util/config.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/include/linux/bitops.h [deleted file]
tools/perf/util/machine.c
tools/perf/util/record.c
tools/perf/util/srcline.c
tools/perf/util/symbol-minimal.c
tools/perf/util/util.c
tools/perf/util/util.h
tools/power/cpupower/utils/cpupower.c
tools/power/cpupower/utils/helpers/sysfs.c
tools/testing/selftests/exec/execveat.c
tools/thermal/tmon/sysfs.c
virt/kvm/kvm_main.c

index e213b27f3921a88768201638d6d358f0f0c80419..ce57b79670a5cfb07472c8567c6f0920417acb4a 100644 (file)
@@ -96,3 +96,6 @@ x509.genkey
 
 # Kconfig presets
 all.config
+
+# Kdevelop4
+*.kdev4
diff --git a/CREDITS b/CREDITS
index c56d8aa10131d8443f7c5b717ae2b13b2a1d376b..96935df0b6fe5d10cf1558c8ed6a15af89fb0cac 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -1734,14 +1734,14 @@ S: Chapel Hill, North Carolina 27514-4818
 S: USA
 
 N: Dave Jones
-E: davej@redhat.com
+E: davej@codemonkey.org.uk
 W: http://www.codemonkey.org.uk
 D: Assorted VIA x86 support.
 D: 2.5 AGPGART overhaul.
 D: CPUFREQ maintenance.
-D: Fedora kernel maintenance.
+D: Fedora kernel maintenance (2003-2014).
+D: 'Trinity' and similar fuzz testing work.
 D: Misc/Other.
-S: 314 Littleton Rd, Westford, MA 01886, USA
 
 N: Martin Josfsson
 E: gandalf@wlug.westbo.se
index 1fee72f4d3319715f9efc2e258458cd807f3a537..4ff84623d5e16eb42c17f90eb1851eea19b9d82f 100644 (file)
@@ -74,7 +74,7 @@ the operations defined in clk.h:
                long            (*determine_rate)(struct clk_hw *hw,
                                                unsigned long rate,
                                                unsigned long *best_parent_rate,
-                                               struct clk **best_parent_clk);
+                                               struct clk_hw **best_parent_clk);
                int             (*set_parent)(struct clk_hw *hw, u8 index);
                u8              (*get_parent)(struct clk_hw *hw);
                int             (*set_rate)(struct clk_hw *hw,
diff --git a/Documentation/devicetree/bindings/clock/exynos4415-clock.txt b/Documentation/devicetree/bindings/clock/exynos4415-clock.txt
new file mode 100644 (file)
index 0000000..847d98b
--- /dev/null
@@ -0,0 +1,38 @@
+* Samsung Exynos4415 Clock Controller
+
+The Exynos4415 clock controller generates and supplies clock to various
+consumer devices within the Exynos4415 SoC.
+
+Required properties:
+
+- compatible: should be one of the following:
+  - "samsung,exynos4415-cmu" - for the main system clocks controller
+    (CMU_LEFTBUS, CMU_RIGHTBUS, CMU_TOP, CMU_CPU clock domains).
+  - "samsung,exynos4415-cmu-dmc" - for the Exynos4415 SoC DRAM Memory
+    Controller (DMC) domain clock controller.
+
+- reg: physical base address of the controller and length of memory mapped
+  region.
+
+- #clock-cells: should be 1.
+
+Each clock is assigned an identifier and client nodes can use this identifier
+to specify the clock which they consume.
+
+All available clocks are defined as preprocessor macros in
+dt-bindings/clock/exynos4415.h header and can be used in device
+tree sources.
+
+Example 1: An example of a clock controller node is listed below.
+
+       cmu: clock-controller@10030000 {
+               compatible = "samsung,exynos4415-cmu";
+               reg = <0x10030000 0x18000>;
+               #clock-cells = <1>;
+       };
+
+       cmu-dmc: clock-controller@105C0000 {
+               compatible = "samsung,exynos4415-cmu-dmc";
+               reg = <0x105C0000 0x3000>;
+               #clock-cells = <1>;
+       };
diff --git a/Documentation/devicetree/bindings/clock/exynos7-clock.txt b/Documentation/devicetree/bindings/clock/exynos7-clock.txt
new file mode 100644 (file)
index 0000000..6d3d5f8
--- /dev/null
@@ -0,0 +1,93 @@
+* Samsung Exynos7 Clock Controller
+
+Exynos7 clock controller has various blocks which are instantiated
+independently from the device-tree. These clock controllers
+generate and supply clocks to various hardware blocks within
+the SoC.
+
+Each clock is assigned an identifier and client nodes can use
+this identifier to specify the clock which they consume. All
+available clocks are defined as preprocessor macros in
+dt-bindings/clock/exynos7-clk.h header and can be used in
+device tree sources.
+
+External clocks:
+
+There are several clocks that are generated outside the SoC. It
+is expected that they are defined using standard clock bindings
+with following clock-output-names:
+
+ - "fin_pll" - PLL input clock from XXTI
+
+Required Properties for Clock Controller:
+
+ - compatible: clock controllers will use one of the following
+       compatible strings to indicate the clock controller
+       functionality.
+
+       - "samsung,exynos7-clock-topc"
+       - "samsung,exynos7-clock-top0"
+       - "samsung,exynos7-clock-top1"
+       - "samsung,exynos7-clock-ccore"
+       - "samsung,exynos7-clock-peric0"
+       - "samsung,exynos7-clock-peric1"
+       - "samsung,exynos7-clock-peris"
+       - "samsung,exynos7-clock-fsys0"
+       - "samsung,exynos7-clock-fsys1"
+
+ - reg: physical base address of the controller and the length of
+       memory mapped region.
+
+ - #clock-cells: should be 1.
+
+ - clocks: list of clock identifiers which are fed as the input to
+       the given clock controller. Please refer the next section to
+       find the input clocks for a given controller.
+
+- clock-names: list of names of clocks which are fed as the input
+       to the given clock controller.
+
+Input clocks for top0 clock controller:
+       - fin_pll
+       - dout_sclk_bus0_pll
+       - dout_sclk_bus1_pll
+       - dout_sclk_cc_pll
+       - dout_sclk_mfc_pll
+
+Input clocks for top1 clock controller:
+       - fin_pll
+       - dout_sclk_bus0_pll
+       - dout_sclk_bus1_pll
+       - dout_sclk_cc_pll
+       - dout_sclk_mfc_pll
+
+Input clocks for ccore clock controller:
+       - fin_pll
+       - dout_aclk_ccore_133
+
+Input clocks for peric0 clock controller:
+       - fin_pll
+       - dout_aclk_peric0_66
+       - sclk_uart0
+
+Input clocks for peric1 clock controller:
+       - fin_pll
+       - dout_aclk_peric1_66
+       - sclk_uart1
+       - sclk_uart2
+       - sclk_uart3
+
+Input clocks for peris clock controller:
+       - fin_pll
+       - dout_aclk_peris_66
+
+Input clocks for fsys0 clock controller:
+       - fin_pll
+       - dout_aclk_fsys0_200
+       - dout_sclk_mmc2
+
+Input clocks for fsys1 clock controller:
+       - fin_pll
+       - dout_aclk_fsys1_200
+       - dout_sclk_mmc0
+       - dout_sclk_mmc1
diff --git a/Documentation/devicetree/bindings/clock/marvell,mmp2.txt b/Documentation/devicetree/bindings/clock/marvell,mmp2.txt
new file mode 100644 (file)
index 0000000..af376a0
--- /dev/null
@@ -0,0 +1,21 @@
+* Marvell MMP2 Clock Controller
+
+The MMP2 clock subsystem generates and supplies clock to various
+controllers within the MMP2 SoC.
+
+Required Properties:
+
+- compatible: should be one of the following.
+  - "marvell,mmp2-clock" - controller compatible with MMP2 SoC.
+
+- reg: physical base address of the clock subsystem and length of memory mapped
+  region. There are 3 places in SOC has clock control logic:
+  "mpmu", "apmu", "apbc". So three reg spaces need to be defined.
+
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Each clock is assigned an identifier and client nodes use this identifier
+to specify the clock which they consume.
+
+All these identifier could be found in <dt-bindings/clock/marvell-mmp2.h>.
diff --git a/Documentation/devicetree/bindings/clock/marvell,pxa168.txt b/Documentation/devicetree/bindings/clock/marvell,pxa168.txt
new file mode 100644 (file)
index 0000000..c62eb1d
--- /dev/null
@@ -0,0 +1,21 @@
+* Marvell PXA168 Clock Controller
+
+The PXA168 clock subsystem generates and supplies clock to various
+controllers within the PXA168 SoC.
+
+Required Properties:
+
+- compatible: should be one of the following.
+  - "marvell,pxa168-clock" - controller compatible with PXA168 SoC.
+
+- reg: physical base address of the clock subsystem and length of memory mapped
+  region. There are 3 places in SOC has clock control logic:
+  "mpmu", "apmu", "apbc". So three reg spaces need to be defined.
+
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Each clock is assigned an identifier and client nodes use this identifier
+to specify the clock which they consume.
+
+All these identifier could be found in <dt-bindings/clock/marvell,pxa168.h>.
diff --git a/Documentation/devicetree/bindings/clock/marvell,pxa910.txt b/Documentation/devicetree/bindings/clock/marvell,pxa910.txt
new file mode 100644 (file)
index 0000000..d9f41f3
--- /dev/null
@@ -0,0 +1,21 @@
+* Marvell PXA910 Clock Controller
+
+The PXA910 clock subsystem generates and supplies clock to various
+controllers within the PXA910 SoC.
+
+Required Properties:
+
+- compatible: should be one of the following.
+  - "marvell,pxa910-clock" - controller compatible with PXA910 SoC.
+
+- reg: physical base address of the clock subsystem and length of memory mapped
+  region. There are 4 places in SOC has clock control logic:
+  "mpmu", "apmu", "apbc", "apbcp". So four reg spaces need to be defined.
+
+- #clock-cells: should be 1.
+- #reset-cells: should be 1.
+
+Each clock is assigned an identifier and client nodes use this identifier
+to specify the clock which they consume.
+
+All these identifier could be found in <dt-bindings/clock/marvell-pxa910.h>.
index 952e373178d27b10d4fca3bdffc8519d5f4531ae..054f65f9319cd71c74936dcab565901559a9d280 100644 (file)
@@ -7,11 +7,16 @@ to 64.
 Required Properties:
 
   - compatible: Must be one of the following
+    - "renesas,r8a73a4-div6-clock" for R8A73A4 (R-Mobile APE6) DIV6 clocks
+    - "renesas,r8a7740-div6-clock" for R8A7740 (R-Mobile A1) DIV6 clocks
     - "renesas,r8a7790-div6-clock" for R8A7790 (R-Car H2) DIV6 clocks
     - "renesas,r8a7791-div6-clock" for R8A7791 (R-Car M2) DIV6 clocks
+    - "renesas,sh73a0-div6-clock" for SH73A0 (SH-Mobile AG5) DIV6 clocks
     - "renesas,cpg-div6-clock" for generic DIV6 clocks
   - reg: Base address and length of the memory resource used by the DIV6 clock
-  - clocks: Reference to the parent clock
+  - clocks: Reference to the parent clock(s); either one, four, or eight
+    clocks must be specified.  For clocks with multiple parents, invalid
+    settings must be specified as "<0>".
   - #clock-cells: Must be 0
   - clock-output-names: The name of the clock as a free-form string
 
@@ -19,10 +24,11 @@ Required Properties:
 Example
 -------
 
-       sd2_clk: sd2_clk@e6150078 {
-               compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock";
-               reg = <0 0xe6150078 0 4>;
-               clocks = <&pll1_div2_clk>;
+       sdhi2_clk: sdhi2_clk@e615007c {
+               compatible = "renesas,r8a73a4-div6-clock", "renesas,cpg-div6-clock";
+               reg = <0 0xe615007c 0 4>;
+               clocks = <&pll1_div2_clk>, <&cpg_clocks R8A73A4_CLK_PLL2S>,
+                        <0>, <&extal2_clk>;
                #clock-cells = <0>;
-               clock-output-names = "sd2";
+               clock-output-names = "sdhi2ck";
        };
index a5f52238c80d5f9b9560914f59e653e772b42bea..2e18676bd4b56503ce42ec4b4d368c32041fe1d0 100644 (file)
@@ -26,11 +26,11 @@ Required Properties:
     must appear in the same order as the output clocks.
   - #clock-cells: Must be 1
   - clock-output-names: The name of the clocks as free-form strings
-  - renesas,clock-indices: Indices of the gate clocks into the group (0 to 31)
+  - clock-indices: Indices of the gate clocks into the group (0 to 31)
 
-The clocks, clock-output-names and renesas,clock-indices properties contain one
-entry per gate clock. The MSTP groups are sparsely populated. Unimplemented
-gate clocks must not be declared.
+The clocks, clock-output-names and clock-indices properties contain one entry
+per gate clock. The MSTP groups are sparsely populated. Unimplemented gate
+clocks must not be declared.
 
 
 Example
index ed116df9c3e769a2ceef9cb4d1dd1eaf44473f38..67b2b99f2b339f0a35a8fddfd9127779542ca76b 100644 (file)
@@ -10,14 +10,17 @@ Required properties:
        "allwinner,sun4i-a10-pll1-clk" - for the main PLL clock and PLL4
        "allwinner,sun6i-a31-pll1-clk" - for the main PLL clock on A31
        "allwinner,sun8i-a23-pll1-clk" - for the main PLL clock on A23
+       "allwinner,sun9i-a80-pll4-clk" - for the peripheral PLLs on A80
        "allwinner,sun4i-a10-pll5-clk" - for the PLL5 clock
        "allwinner,sun4i-a10-pll6-clk" - for the PLL6 clock
        "allwinner,sun6i-a31-pll6-clk" - for the PLL6 clock on A31
+       "allwinner,sun9i-a80-gt-clk" - for the GT bus clock on A80
        "allwinner,sun4i-a10-cpu-clk" - for the CPU multiplexer clock
        "allwinner,sun4i-a10-axi-clk" - for the AXI clock
        "allwinner,sun8i-a23-axi-clk" - for the AXI clock on A23
        "allwinner,sun4i-a10-axi-gates-clk" - for the AXI gates
        "allwinner,sun4i-a10-ahb-clk" - for the AHB clock
+       "allwinner,sun9i-a80-ahb-clk" - for the AHB bus clocks on A80
        "allwinner,sun4i-a10-ahb-gates-clk" - for the AHB gates on A10
        "allwinner,sun5i-a13-ahb-gates-clk" - for the AHB gates on A13
        "allwinner,sun5i-a10s-ahb-gates-clk" - for the AHB gates on A10s
@@ -26,24 +29,29 @@ Required properties:
        "allwinner,sun6i-a31-ahb1-mux-clk" - for the AHB1 multiplexer on A31
        "allwinner,sun6i-a31-ahb1-gates-clk" - for the AHB1 gates on A31
        "allwinner,sun8i-a23-ahb1-gates-clk" - for the AHB1 gates on A23
+       "allwinner,sun9i-a80-ahb0-gates-clk" - for the AHB0 gates on A80
+       "allwinner,sun9i-a80-ahb1-gates-clk" - for the AHB1 gates on A80
+       "allwinner,sun9i-a80-ahb2-gates-clk" - for the AHB2 gates on A80
        "allwinner,sun4i-a10-apb0-clk" - for the APB0 clock
        "allwinner,sun6i-a31-apb0-clk" - for the APB0 clock on A31
        "allwinner,sun8i-a23-apb0-clk" - for the APB0 clock on A23
+       "allwinner,sun9i-a80-apb0-clk" - for the APB0 bus clock on A80
        "allwinner,sun4i-a10-apb0-gates-clk" - for the APB0 gates on A10
        "allwinner,sun5i-a13-apb0-gates-clk" - for the APB0 gates on A13
        "allwinner,sun5i-a10s-apb0-gates-clk" - for the APB0 gates on A10s
        "allwinner,sun6i-a31-apb0-gates-clk" - for the APB0 gates on A31
        "allwinner,sun7i-a20-apb0-gates-clk" - for the APB0 gates on A20
        "allwinner,sun8i-a23-apb0-gates-clk" - for the APB0 gates on A23
+       "allwinner,sun9i-a80-apb0-gates-clk" - for the APB0 gates on A80
        "allwinner,sun4i-a10-apb1-clk" - for the APB1 clock
-       "allwinner,sun4i-a10-apb1-mux-clk" - for the APB1 clock muxing
+       "allwinner,sun9i-a80-apb1-clk" - for the APB1 bus clock on A80
        "allwinner,sun4i-a10-apb1-gates-clk" - for the APB1 gates on A10
        "allwinner,sun5i-a13-apb1-gates-clk" - for the APB1 gates on A13
        "allwinner,sun5i-a10s-apb1-gates-clk" - for the APB1 gates on A10s
        "allwinner,sun6i-a31-apb1-gates-clk" - for the APB1 gates on A31
        "allwinner,sun7i-a20-apb1-gates-clk" - for the APB1 gates on A20
        "allwinner,sun8i-a23-apb1-gates-clk" - for the APB1 gates on A23
-       "allwinner,sun6i-a31-apb2-div-clk" - for the APB2 gates on A31
+       "allwinner,sun9i-a80-apb1-gates-clk" - for the APB1 gates on A80
        "allwinner,sun6i-a31-apb2-gates-clk" - for the APB2 gates on A31
        "allwinner,sun8i-a23-apb2-gates-clk" - for the APB2 gates on A23
        "allwinner,sun5i-a13-mbus-clk" - for the MBUS clock on A13
@@ -63,8 +71,9 @@ Required properties for all clocks:
        multiplexed clocks, the list order must match the hardware
        programming order.
 - #clock-cells : from common clock binding; shall be set to 0 except for
-       "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk" and
-       "allwinner,sun4i-pll6-clk" where it shall be set to 1
+       the following compatibles where it shall be set to 1:
+       "allwinner,*-gates-clk", "allwinner,sun4i-pll5-clk",
+       "allwinner,sun4i-pll6-clk", "allwinner,sun6i-a31-pll6-clk"
 - clock-output-names : shall be the corresponding names of the outputs.
        If the clock module only has one output, the name shall be the
        module name.
@@ -79,6 +88,12 @@ Clock consumers should specify the desired clocks they use with a
 "clocks" phandle cell. Consumers that are using a gated clock should
 provide an additional ID in their clock property. This ID is the
 offset of the bit controlling this particular gate in the register.
+For the other clocks with "#clock-cells" = 1, the additional ID shall
+refer to the index of the output.
+
+For "allwinner,sun6i-a31-pll6-clk", there are 2 outputs. The first output
+is the normal PLL6 output, or "pll6". The second output is rate doubled
+PLL6, or "pll6x2".
 
 For example:
 
@@ -106,6 +121,14 @@ pll5: clk@01c20020 {
        clock-output-names = "pll5_ddr", "pll5_other";
 };
 
+pll6: clk@01c20028 {
+       #clock-cells = <1>;
+       compatible = "allwinner,sun6i-a31-pll6-clk";
+       reg = <0x01c20028 0x4>;
+       clocks = <&osc24M>;
+       clock-output-names = "pll6", "pll6x2";
+};
+
 cpu: cpu@01c20054 {
        #clock-cells = <0>;
        compatible = "allwinner,sun4i-a10-cpu-clk";
diff --git a/Documentation/devicetree/bindings/i2c/i2c-opal.txt b/Documentation/devicetree/bindings/i2c/i2c-opal.txt
new file mode 100644 (file)
index 0000000..12bc614
--- /dev/null
@@ -0,0 +1,37 @@
+Device-tree bindings for I2C OPAL driver
+----------------------------------------
+
+Most of the device node and properties layout is specific to the firmware and
+used by the firmware itself for configuring the port. From the linux
+perspective, the properties of use are "ibm,port-name" and "ibm,opal-id".
+
+Required properties:
+
+- reg: Port-id within a given master
+- compatible: must be "ibm,opal-i2c"
+- ibm,opal-id: Refers to a specific bus and used to identify it when calling
+              the relevant OPAL functions.
+- bus-frequency: Operating frequency of the i2c bus (in HZ). Informational for
+                linux, used by the FW though.
+
+Optional properties:
+- ibm,port-name: Firmware provides this name that uniquely identifies the i2c
+                port.
+
+The node contains a number of other properties that are used by the FW itself
+and depend on the specific hardware implementation. The example below depicts
+a P8 on-chip bus.
+
+Example:
+
+i2c-bus@0 {
+       reg = <0x0>;
+       bus-frequency = <0x61a80>;
+       compatible = "ibm,power8-i2c-port", "ibm,opal-i2c";
+       ibm,opal-id = <0x1>;
+       ibm,port-name = "p8_00000000_e1p0";
+       #address-cells = <0x1>;
+       phandle = <0x10000006>;
+       #size-cells = <0x0>;
+       linux,phandle = <0x10000006>;
+};
index 4472ed2ad921b74ce088643f2ca7c3cf494df30a..818518a3ff01a124acaf07ba0e8a109c5a1ce1b3 100644 (file)
@@ -7,11 +7,15 @@ that can be used in conjunction with compiler changes to check memory
 references, for those references whose compile-time normal intentions are
 usurped at runtime due to buffer overflow or underflow.
 
+You can tell if your CPU supports MPX by looking in /proc/cpuinfo:
+
+       cat /proc/cpuinfo  | grep ' mpx '
+
 For more information, please refer to Intel(R) Architecture Instruction
 Set Extensions Programming Reference, Chapter 9: Intel(R) Memory Protection
 Extensions.
 
-Note: Currently no hardware with MPX ISA is available but it is always
+Note: As of December 2014, no hardware with MPX is available but it is
 possible to use SDE (Intel(R) Software Development Emulator) instead, which
 can be downloaded from
 http://software.intel.com/en-us/articles/intel-software-development-emulator
@@ -30,9 +34,15 @@ is how we expect the compiler, application and kernel to work together.
    instrumentation as well as some setup code called early after the app
    starts. New instruction prefixes are noops for old CPUs.
 2) That setup code allocates (virtual) space for the "bounds directory",
-   points the "bndcfgu" register to the directory and notifies the kernel
-   (via the new prctl(PR_MPX_ENABLE_MANAGEMENT)) that the app will be using
-   MPX.
+   points the "bndcfgu" register to the directory (must also set the valid
+   bit) and notifies the kernel (via the new prctl(PR_MPX_ENABLE_MANAGEMENT))
+   that the app will be using MPX.  The app must be careful not to access
+   the bounds tables between the time when it populates "bndcfgu" and
+   when it calls the prctl().  This might be hard to guarantee if the app
+   is compiled with MPX.  You can add "__attribute__((bnd_legacy))" to
+   the function to disable MPX instrumentation to help guarantee this.
+   Also be careful not to call out to any other code which might be
+   MPX-instrumented.
 3) The kernel detects that the CPU has MPX, allows the new prctl() to
    succeed, and notes the location of the bounds directory. Userspace is
    expected to keep the bounds directory at that locationWe note it
index 08f671dad3e935850a8eb636b0c2afb195ce1461..ddb9ac8d32b3eddc46d66bd408392ae09b637ea7 100644 (file)
@@ -2576,8 +2576,9 @@ F:        drivers/media/platform/coda/
 
 COMMON CLK FRAMEWORK
 M:     Mike Turquette <mturquette@linaro.org>
+M:     Stephen Boyd <sboyd@codeaurora.org>
 L:     linux-kernel@vger.kernel.org
-T:     git git://git.linaro.org/people/mturquette/linux.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux.git
 S:     Maintained
 F:     drivers/clk/
 X:     drivers/clk/clkdev.c
index fd80c6e9bc2367f79f47edebb15649d23d341791..ef748e17702f5109bf2678fb57f7929ef411d938 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
-PATCHLEVEL = 18
+PATCHLEVEL = 19
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc2
 NAME = Diseased Newt
 
 # *DOCUMENTATION*
@@ -481,9 +481,10 @@ asm-generic:
 # of make so .config is not included in this case either (for *config).
 
 version_h := include/generated/uapi/linux/version.h
+old_version_h := include/linux/version.h
 
 no-dot-config-targets := clean mrproper distclean \
-                        cscope gtags TAGS tags help %docs check% coccicheck \
+                        cscope gtags TAGS tags help% %docs check% coccicheck \
                         $(version_h) headers_% archheaders archscripts \
                         kernelversion %src-pkg
 
@@ -1005,6 +1006,7 @@ endef
 
 $(version_h): $(srctree)/Makefile FORCE
        $(call filechk,version.h)
+       $(Q)rm -f $(old_version_h)
 
 include/generated/utsrelease.h: include/config/kernel.release FORCE
        $(call filechk,utsrelease.h)
@@ -1036,8 +1038,6 @@ firmware_install: FORCE
 #Default location for installed headers
 export INSTALL_HDR_PATH = $(objtree)/usr
 
-hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
-
 # If we do an all arch process set dst to asm-$(hdr-arch)
 hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
 
@@ -1175,7 +1175,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
                  Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
                  signing_key.priv signing_key.x509 x509.genkey         \
                  extra_certificates signing_key.x509.keyid             \
-                 signing_key.x509.signer include/linux/version.h
+                 signing_key.x509.signer
 
 # clean - Delete most, but leave enough to build external modules
 #
@@ -1235,7 +1235,7 @@ rpm: include/config/kernel.release FORCE
 # ---------------------------------------------------------------------------
 
 boards := $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*_defconfig)
-boards := $(notdir $(boards))
+boards := $(sort $(notdir $(boards)))
 board-dirs := $(dir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/*/*_defconfig))
 board-dirs := $(sort $(notdir $(board-dirs:/=)))
 
@@ -1326,7 +1326,7 @@ help-board-dirs := $(addprefix help-,$(board-dirs))
 
 help-boards: $(help-board-dirs)
 
-boards-per-dir = $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig))
+boards-per-dir = $(sort $(notdir $(wildcard $(srctree)/arch/$(SRCARCH)/configs/$*/*_defconfig)))
 
 $(help-board-dirs): help-%:
        @echo  'Architecture specific targets ($(SRCARCH) $*):'
@@ -1581,11 +1581,6 @@ ifneq ($(cmd_files),)
   include $(cmd_files)
 endif
 
-# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.clean obj=dir
-# Usage:
-# $(Q)$(MAKE) $(clean)=dir
-clean := -f $(srctree)/scripts/Makefile.clean obj
-
 endif  # skip-makefile
 
 PHONY += FORCE
index 6a3d9a6c4497915587d4deceac8829e4736ea3f0..91bd5bd628576d6da17a10aa87afb52232087d06 100644 (file)
@@ -177,6 +177,9 @@ dtb-$(CONFIG_MACH_KIRKWOOD) += kirkwood-b3.dtb \
 dtb-$(CONFIG_ARCH_LPC32XX) += ea3250.dtb phy3250.dtb
 dtb-$(CONFIG_ARCH_MARCO) += marco-evb.dtb
 dtb-$(CONFIG_MACH_MESON6) += meson6-atv1200.dtb
+dtb-$(CONFIG_ARCH_MMP) += pxa168-aspenite.dtb \
+       pxa910-dkb.dtb \
+       mmp2-brownstone.dtb
 dtb-$(CONFIG_ARCH_MOXART) += moxart-uc7112lx.dtb
 dtb-$(CONFIG_ARCH_MXC) += \
        imx1-ads.dtb \
index 7f70a39459f649d7051b5423c754e356686762db..350208c5e1ed2f6d27d192d1c256ab58db5ecb27 100644 (file)
@@ -8,7 +8,7 @@
  */
 
 /dts-v1/;
-/include/ "mmp2.dtsi"
+#include "mmp2.dtsi"
 
 / {
        model = "Marvell MMP2 Brownstone Development Board";
index 4e8b08c628c7ee405efd864f743cbadc8a617321..766bbb8495b60d796ccd857c4965ab4f8d09721c 100644 (file)
@@ -7,7 +7,8 @@
  *  publishhed by the Free Software Foundation.
  */
 
-/include/ "skeleton.dtsi"
+#include "skeleton.dtsi"
+#include <dt-bindings/clock/marvell,mmp2.h>
 
 / {
        aliases {
                                compatible = "mrvl,mmp-uart";
                                reg = <0xd4030000 0x1000>;
                                interrupts = <27>;
+                               clocks = <&soc_clocks MMP2_CLK_UART0>;
+                               resets = <&soc_clocks MMP2_CLK_UART0>;
                                status = "disabled";
                        };
 
                                compatible = "mrvl,mmp-uart";
                                reg = <0xd4017000 0x1000>;
                                interrupts = <28>;
+                               clocks = <&soc_clocks MMP2_CLK_UART1>;
+                               resets = <&soc_clocks MMP2_CLK_UART1>;
                                status = "disabled";
                        };
 
                                compatible = "mrvl,mmp-uart";
                                reg = <0xd4018000 0x1000>;
                                interrupts = <24>;
+                               clocks = <&soc_clocks MMP2_CLK_UART2>;
+                               resets = <&soc_clocks MMP2_CLK_UART2>;
                                status = "disabled";
                        };
 
                                compatible = "mrvl,mmp-uart";
                                reg = <0xd4016000 0x1000>;
                                interrupts = <46>;
+                               clocks = <&soc_clocks MMP2_CLK_UART3>;
+                               resets = <&soc_clocks MMP2_CLK_UART3>;
                                status = "disabled";
                        };
 
                                #gpio-cells = <2>;
                                interrupts = <49>;
                                interrupt-names = "gpio_mux";
+                               clocks = <&soc_clocks MMP2_CLK_GPIO>;
+                               resets = <&soc_clocks MMP2_CLK_GPIO>;
                                interrupt-controller;
                                #interrupt-cells = <1>;
                                ranges;
                                compatible = "mrvl,mmp-twsi";
                                reg = <0xd4011000 0x1000>;
                                interrupts = <7>;
+                               clocks = <&soc_clocks MMP2_CLK_TWSI0>;
+                               resets = <&soc_clocks MMP2_CLK_TWSI0>;
                                #address-cells = <1>;
                                #size-cells = <0>;
                                mrvl,i2c-fast-mode;
                                compatible = "mrvl,mmp-twsi";
                                reg = <0xd4025000 0x1000>;
                                interrupts = <58>;
+                               clocks = <&soc_clocks MMP2_CLK_TWSI1>;
+                               resets = <&soc_clocks MMP2_CLK_TWSI1>;
                                status = "disabled";
                        };
 
                                interrupts = <1 0>;
                                interrupt-names = "rtc 1Hz", "rtc alarm";
                                interrupt-parent = <&intcmux5>;
+                               clocks = <&soc_clocks MMP2_CLK_RTC>;
+                               resets = <&soc_clocks MMP2_CLK_RTC>;
                                status = "disabled";
                        };
                };
+
+               soc_clocks: clocks{
+                       compatible = "marvell,mmp2-clock";
+                       reg = <0xd4050000 0x1000>,
+                             <0xd4282800 0x400>,
+                             <0xd4015000 0x1000>;
+                       reg-names = "mpmu", "apmu", "apbc";
+                       #clock-cells = <1>;
+                       #reset-cells = <1>;
+               };
        };
 };
index e762facb3fa434473e2a1b53ca0a3c68f323c20a..0a988b3fb248ee46658bf1d65780f3f01b4582be 100644 (file)
@@ -8,7 +8,7 @@
  */
 
 /dts-v1/;
-/include/ "pxa168.dtsi"
+#include "pxa168.dtsi"
 
 / {
        model = "Marvell PXA168 Aspenite Development Board";
index 975dad21ac3890d4b81bb23a0d1bf2962a831e09..b899e25cbb1be973c60dc8a8d8c220c02cf3c039 100644 (file)
@@ -7,7 +7,8 @@
  *  publishhed by the Free Software Foundation.
  */
 
-/include/ "skeleton.dtsi"
+#include "skeleton.dtsi"
+#include <dt-bindings/clock/marvell,pxa168.h>
 
 / {
        aliases {
@@ -59,6 +60,8 @@
                                compatible = "mrvl,mmp-uart";
                                reg = <0xd4017000 0x1000>;
                                interrupts = <27>;
+                               clocks = <&soc_clocks PXA168_CLK_UART0>;
+                               resets = <&soc_clocks PXA168_CLK_UART0>;
                                status = "disabled";
                        };
 
@@ -66,6 +69,8 @@
                                compatible = "mrvl,mmp-uart";
                                reg = <0xd4018000 0x1000>;
                                interrupts = <28>;
+                               clocks = <&soc_clocks PXA168_CLK_UART1>;
+                               resets = <&soc_clocks PXA168_CLK_UART1>;
                                status = "disabled";
                        };
 
@@ -73,6 +78,8 @@
                                compatible = "mrvl,mmp-uart";
                                reg = <0xd4026000 0x1000>;
                                interrupts = <29>;
+                               clocks = <&soc_clocks PXA168_CLK_UART2>;
+                               resets = <&soc_clocks PXA168_CLK_UART2>;
                                status = "disabled";
                        };
 
@@ -84,6 +91,8 @@
                                gpio-controller;
                                #gpio-cells = <2>;
                                interrupts = <49>;
+                               clocks = <&soc_clocks PXA168_CLK_GPIO>;
+                               resets = <&soc_clocks PXA168_CLK_GPIO>;
                                interrupt-names = "gpio_mux";
                                interrupt-controller;
                                #interrupt-cells = <1>;
                                compatible = "mrvl,mmp-twsi";
                                reg = <0xd4011000 0x1000>;
                                interrupts = <7>;
+                               clocks = <&soc_clocks PXA168_CLK_TWSI0>;
+                               resets = <&soc_clocks PXA168_CLK_TWSI0>;
                                mrvl,i2c-fast-mode;
                                status = "disabled";
                        };
                                compatible = "mrvl,mmp-twsi";
                                reg = <0xd4025000 0x1000>;
                                interrupts = <58>;
+                               clocks = <&soc_clocks PXA168_CLK_TWSI1>;
+                               resets = <&soc_clocks PXA168_CLK_TWSI1>;
                                status = "disabled";
                        };
 
                                reg = <0xd4010000 0x1000>;
                                interrupts = <5 6>;
                                interrupt-names = "rtc 1Hz", "rtc alarm";
+                               clocks = <&soc_clocks PXA168_CLK_RTC>;
+                               resets = <&soc_clocks PXA168_CLK_RTC>;
                                status = "disabled";
                        };
                };
+
+               soc_clocks: clocks{
+                       compatible = "marvell,pxa168-clock";
+                       reg = <0xd4050000 0x1000>,
+                             <0xd4282800 0x400>,
+                             <0xd4015000 0x1000>;
+                       reg-names = "mpmu", "apmu", "apbc";
+                       #clock-cells = <1>;
+                       #reset-cells = <1>;
+               };
        };
 };
index 595492aa505372047aab605aff58b043f16896f6..c82f2810ec73c2274b9c9f4a996cf5a31e530379 100644 (file)
@@ -8,7 +8,7 @@
  */
 
 /dts-v1/;
-/include/ "pxa910.dtsi"
+#include "pxa910.dtsi"
 
 / {
        model = "Marvell PXA910 DKB Development Board";
index 0247c622f580728f77f31dbb36d555d310f524d0..0868f6729be1eaf60fc6df14887f449e92f750c6 100644 (file)
@@ -7,7 +7,8 @@
  *  publishhed by the Free Software Foundation.
  */
 
-/include/ "skeleton.dtsi"
+#include "skeleton.dtsi"
+#include <dt-bindings/clock/marvell,pxa910.h>
 
 / {
        aliases {
@@ -71,6 +72,8 @@
                                compatible = "mrvl,mmp-uart";
                                reg = <0xd4017000 0x1000>;
                                interrupts = <27>;
+                               clocks = <&soc_clocks PXA910_CLK_UART0>;
+                               resets = <&soc_clocks PXA910_CLK_UART0>;
                                status = "disabled";
                        };
 
@@ -78,6 +81,8 @@
                                compatible = "mrvl,mmp-uart";
                                reg = <0xd4018000 0x1000>;
                                interrupts = <28>;
+                               clocks = <&soc_clocks PXA910_CLK_UART1>;
+                               resets = <&soc_clocks PXA910_CLK_UART1>;
                                status = "disabled";
                        };
 
@@ -85,6 +90,8 @@
                                compatible = "mrvl,mmp-uart";
                                reg = <0xd4036000 0x1000>;
                                interrupts = <59>;
+                               clocks = <&soc_clocks PXA910_CLK_UART2>;
+                               resets = <&soc_clocks PXA910_CLK_UART2>;
                                status = "disabled";
                        };
 
                                #gpio-cells = <2>;
                                interrupts = <49>;
                                interrupt-names = "gpio_mux";
+                               clocks = <&soc_clocks PXA910_CLK_GPIO>;
+                               resets = <&soc_clocks PXA910_CLK_GPIO>;
                                interrupt-controller;
                                #interrupt-cells = <1>;
                                ranges;
                                #size-cells = <0>;
                                reg = <0xd4011000 0x1000>;
                                interrupts = <7>;
+                               clocks = <&soc_clocks PXA910_CLK_TWSI0>;
+                               resets = <&soc_clocks PXA910_CLK_TWSI0>;
                                mrvl,i2c-fast-mode;
                                status = "disabled";
                        };
                                #size-cells = <0>;
                                reg = <0xd4037000 0x1000>;
                                interrupts = <54>;
+                               clocks = <&soc_clocks PXA910_CLK_TWSI1>;
+                               resets = <&soc_clocks PXA910_CLK_TWSI1>;
                                status = "disabled";
                        };
 
                                reg = <0xd4010000 0x1000>;
                                interrupts = <5 6>;
                                interrupt-names = "rtc 1Hz", "rtc alarm";
+                               clocks = <&soc_clocks PXA910_CLK_RTC>;
+                               resets = <&soc_clocks PXA910_CLK_RTC>;
                                status = "disabled";
                        };
                };
+
+               soc_clocks: clocks{
+                       compatible = "marvell,pxa910-clock";
+                       reg = <0xd4050000 0x1000>,
+                             <0xd4282800 0x400>,
+                             <0xd4015000 0x1000>,
+                             <0xd403b000 0x1000>;
+                       reg-names = "mpmu", "apmu", "apbc", "apbcp";
+                       #clock-cells = <1>;
+                       #reset-cells = <1>;
+               };
        };
 };
index e3ab942fd1488c974824dc05445ad970158dbc5b..7b4099fcf81788714def505ff009e2b7a4948db2 100644 (file)
                                "apb0_ir1", "apb0_keypad";
                };
 
-               apb1_mux: apb1_mux@01c20058 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-apb1-mux-clk";
-                       reg = <0x01c20058 0x4>;
-                       clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
-                       clock-output-names = "apb1_mux";
-               };
-
-               apb1: apb1@01c20058 {
+               apb1: clk@01c20058 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&apb1_mux>;
+                       clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
                        clock-output-names = "apb1";
                };
 
index 81ad4b94e812c7f16e1e885cfff7181249d5f61a..1b76667f3182694ffb7d055cb8f8e7230809a0bf 100644 (file)
                                "apb0_ir", "apb0_keypad";
                };
 
-               apb1_mux: apb1_mux@01c20058 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-apb1-mux-clk";
-                       reg = <0x01c20058 0x4>;
-                       clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
-                       clock-output-names = "apb1_mux";
-               };
-
-               apb1: apb1@01c20058 {
+               apb1: clk@01c20058 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&apb1_mux>;
+                       clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
                        clock-output-names = "apb1";
                };
 
index b131068f4f351ca92e7df07d63641cec143c5fc2..c35217ea1f6473653b4d67ce953f3c761fa043a0 100644 (file)
                        clock-output-names = "apb0_codec", "apb0_pio", "apb0_ir";
                };
 
-               apb1_mux: apb1_mux@01c20058 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-apb1-mux-clk";
-                       reg = <0x01c20058 0x4>;
-                       clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
-                       clock-output-names = "apb1_mux";
-               };
-
-               apb1: apb1@01c20058 {
+               apb1: clk@01c20058 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&apb1_mux>;
+                       clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
                        clock-output-names = "apb1";
                };
 
index a400172a8a52255c6ed7bd4226e3f9ee016d2dfc..f47156b6572bbaf09872686b70f09268200c3cc9 100644 (file)
                                        "apb1_daudio1";
                };
 
-               apb2_mux: apb2_mux@01c20058 {
+               apb2: clk@01c20058 {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-apb1-mux-clk";
+                       compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
                        clocks = <&osc32k>, <&osc24M>, <&pll6 0>, <&pll6 0>;
-                       clock-output-names = "apb2_mux";
-               };
-
-               apb2: apb2@01c20058 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun6i-a31-apb2-div-clk";
-                       reg = <0x01c20058 0x4>;
-                       clocks = <&apb2_mux>;
                        clock-output-names = "apb2";
                };
 
index 82a524ce28ad1502c5aee93accdb649aeede8821..e21ce5992d565c348ae3b798ad232cd8f6c16c6b 100644 (file)
                                "apb0_iis2", "apb0_keypad";
                };
 
-               apb1_mux: apb1_mux@01c20058 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-apb1-mux-clk";
-                       reg = <0x01c20058 0x4>;
-                       clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
-                       clock-output-names = "apb1_mux";
-               };
-
-               apb1: apb1@01c20058 {
+               apb1: clk@01c20058 {
                        #clock-cells = <0>;
                        compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
-                       clocks = <&apb1_mux>;
+                       clocks = <&osc24M>, <&pll6 1>, <&osc32k>;
                        clock-output-names = "apb1";
                };
 
index 6086adbf9d749adf1fb2040fb76675e84292bab9..0746cd1024d7a73b32bcbf6002954a4859d77ee5 100644 (file)
                                        "apb1_daudio0", "apb1_daudio1";
                };
 
-               apb2_mux: apb2_mux_clk@01c20058 {
+               apb2clk@01c20058 {
                        #clock-cells = <0>;
-                       compatible = "allwinner,sun4i-a10-apb1-mux-clk";
+                       compatible = "allwinner,sun4i-a10-apb1-clk";
                        reg = <0x01c20058 0x4>;
                        clocks = <&osc32k>, <&osc24M>, <&pll6>, <&pll6>;
-                       clock-output-names = "apb2_mux";
-               };
-
-               apb2: apb2_clk@01c20058 {
-                       #clock-cells = <0>;
-                       compatible = "allwinner,sun6i-a31-apb2-div-clk";
-                       reg = <0x01c20058 0x4>;
-                       clocks = <&apb2_mux>;
                        clock-output-names = "apb2";
                };
 
index db81d8ce4c03bb1bc175a4c9e3e2fbd0d5013566..9e9a72e3d30faddbca84596d8bb0feae9899d402 100644 (file)
@@ -33,7 +33,7 @@ CONFIG_ARM_APPENDED_DTB=y
 CONFIG_VFP=y
 CONFIG_NEON=y
 CONFIG_BINFMT_MISC=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index d9675c68a39925e1e496f8112601db3d84e7119e..5666e3700a8218142081bd932fe4e92ee3cfcc92 100644 (file)
@@ -43,7 +43,7 @@ CONFIG_KEXEC=y
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index 83a87e48901c105232b25cd89ff774bc72f7458c..7117662bab2ecf5374c9fb16b623e8dffc8ef4ae 100644 (file)
@@ -39,7 +39,7 @@ CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=y
index 1dde5daa84f9fd4a835ac77ad06b34814525e82d..3125e00f05ab8f7a7448da8212e031ddfce8d4aa 100644 (file)
@@ -29,7 +29,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0
 CONFIG_ARM_APPENDED_DTB=y
 CONFIG_VFP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index 759f9b0053e294a0cee0f9ce26f63974daa84850..235842c9ba96ed2c4a7176b26032d5da8f89949c 100644 (file)
@@ -49,7 +49,7 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=m
 CONFIG_CPU_FREQ_GOV_POWERSAVE=m
 CONFIG_CPU_FREQ_GOV_ONDEMAND=m
 CONFIG_CPU_IDLE=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index c4199072902402f542487b584289eb6245b76a4f..5ef14de00a29ba2f433fc93a47d95912569817c8 100644 (file)
@@ -27,7 +27,7 @@ CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
 CONFIG_VFP=y
 CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index eb440aae42833f6c280c36f9e8d6e56fabd331ab..ea316c4b890efadb31df9f90df5ae382c94d4228 100644 (file)
@@ -39,7 +39,6 @@ CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
 CONFIG_PM=y
 CONFIG_APM_EMULATION=y
-CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index 1fe3621faf65694fb0b8f3533b88754e29812c74..112543665dd7cc795a28cfd3468e3f73de1cac06 100644 (file)
@@ -18,7 +18,7 @@ CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_NEON=y
 CONFIG_ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index 182e54692664e7771a9a86f5dc55b3a3daee4333..18e59feaa3071593936ca4f0ff3d3b021eea9b08 100644 (file)
@@ -31,7 +31,6 @@ CONFIG_BINFMT_AOUT=m
 CONFIG_BINFMT_MISC=m
 CONFIG_PM=y
 CONFIG_APM_EMULATION=y
-CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index f707cd2691cf3986249f6ba08e0c5aab8a8c71e9..7c2075a07ebadbf8593a427728dc6c6ead7a92a9 100644 (file)
@@ -54,7 +54,7 @@ CONFIG_ARM_IMX6Q_CPUFREQ=y
 CONFIG_VFP=y
 CONFIG_NEON=y
 CONFIG_BINFMT_MISC=m
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_PM_DEBUG=y
 CONFIG_PM_TEST_SUSPEND=y
 CONFIG_NET=y
index 20a3ff99fae28f3af1107514e42bf80eceef8457..a2067cbfe173b37a0579e5825fa4e879cb289745 100644 (file)
@@ -30,7 +30,7 @@ CONFIG_HIGHMEM=y
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_SUSPEND is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index 8cb115d74fdf4e37a483a69a2e926f794a4b9a4d..5d63fc5d2d48c9b2f74de7327b3204373e87c94c 100644 (file)
@@ -43,7 +43,7 @@ CONFIG_KEXEC=y
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index 929c571ea29b06447aee955b903f24d729b55e39..a82afc916a89fa6a3a02c7ae3b595c9d69cb6cfa 100644 (file)
@@ -37,7 +37,7 @@ CONFIG_AUTO_ZRELADDR=y
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index 57ececba2ae6489e90198dd2d0da1592fe5310c8..05a529311b4d964ac44a099c169f9a3f93c0e3f6 100644 (file)
@@ -28,7 +28,6 @@ CONFIG_KEXEC=y
 CONFIG_VFP=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
 CONFIG_PM=y
-CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index ff91630d34e1844001178aa1843e6364addbb8dc..3c8b6d823189e12c638559be280e63b501157414 100644 (file)
@@ -33,7 +33,7 @@ CONFIG_ARM_APPENDED_DTB=y
 CONFIG_VFP=y
 CONFIG_KEXEC=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index 115cda9f32606a0e3ca9afce28d2632e37f3d1f3..a7dce674f1be073558dd062d5d29b32e331dba9d 100644 (file)
@@ -63,7 +63,6 @@ CONFIG_FPE_NWFPE=y
 CONFIG_BINFMT_MISC=y
 CONFIG_PM=y
 # CONFIG_SUSPEND is not set
-CONFIG_PM_RUNTIME=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index 23591dba47a04ac2fe5efdd55f493551cfd63538..f610230b9c1fa534cdc2b60cd97d410abaccd4ad 100644 (file)
@@ -18,7 +18,7 @@ CONFIG_PREEMPT=y
 CONFIG_AEABI=y
 CONFIG_KEXEC=y
 CONFIG_BINFMT_MISC=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_BLK_DEV_LOOP=y
 CONFIG_BLK_DEV_RAM=y
index b58fb32770a0e6f5468da385babb2dd5629e5647..afa24799477abdc930b4c0545904f06c9fce9d20 100644 (file)
@@ -32,7 +32,7 @@ CONFIG_VFP=y
 CONFIG_NEON=y
 CONFIG_KERNEL_MODE_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_PM_DEBUG=y
 CONFIG_PM_ADVANCED_DEBUG=y
 CONFIG_NET=y
index df2c0f514b0a942fc2f0882d1642e946a5e2cb6f..3df6ca0c1d1fecc615be5e504a83454d97f00188 100644 (file)
@@ -39,7 +39,7 @@ CONFIG_KEXEC=y
 CONFIG_VFP=y
 CONFIG_NEON=y
 # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index f7ac0379850fc8d9ff9328dbd85b705bcb893b7f..7a342d2780a8e7ffbdf246a46eb55a66e7d3e3b8 100644 (file)
@@ -11,7 +11,7 @@ CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_VFP=y
 CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index 40750f93aa83afc44fd1b27caccf5861498488db..3ea9c3377ccbd263f9f5f0c0285ce8ae31dcbc76 100644 (file)
@@ -46,7 +46,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
 CONFIG_CPU_IDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index d219d6a43238c6e354639500af4e5a9d56ff8714..6a1c9898fd031e8eef892ab4ccd6e251b3479249 100644 (file)
@@ -25,7 +25,7 @@ CONFIG_CPU_IDLE=y
 CONFIG_ARM_U8500_CPUIDLE=y
 CONFIG_VFP=y
 CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index 9e7a256396904ba856a00d9dc99ae86d6ccd6588..1bfaa7bfc3924b060d6b0420a00cda8f15adce47 100644 (file)
@@ -16,7 +16,7 @@ CONFIG_ARM_APPENDED_DTB=y
 CONFIG_ARM_ATAG_DTB_COMPAT=y
 CONFIG_VFP=y
 CONFIG_NEON=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_UNIX=y
 CONFIG_INET=y
index ac4bfae26702b0be3333c1184f1ac552f6c0eddd..0fa418463f49e95696c6d6b7ea9ac8d3d2798647 100644 (file)
@@ -120,12 +120,12 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
-       return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+       return !arch_spin_value_unlocked(READ_ONCE(*lock));
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 {
-       struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets);
+       struct __raw_tickets tickets = READ_ONCE(lock->tickets);
        return (tickets.next - tickets.owner) > 1;
 }
 #define arch_spin_is_contended arch_spin_is_contended
index ebdba87b96711a4a2a3970bcd824253ea9273327..fdbfadf00c84b492bb032f3ca47cf94f78f9c0d7 100644 (file)
@@ -86,11 +86,12 @@ config MACH_GPLUGD
 
 config MACH_MMP_DT
        bool "Support MMP (ARMv5) platforms from device tree"
-       select CPU_PXA168
-       select CPU_PXA910
        select USE_OF
        select PINCTRL
        select PINCTRL_SINGLE
+       select COMMON_CLK
+       select ARCH_HAS_RESET_CONTROLLER
+       select CPU_MOHAWK
        help
          Include support for Marvell MMP2 based platforms using
          the device tree. Needn't select any other machine while
@@ -99,10 +100,12 @@ config MACH_MMP_DT
 config MACH_MMP2_DT
        bool "Support MMP2 (ARMv7) platforms from device tree"
        depends on !CPU_MOHAWK
-       select CPU_MMP2
        select USE_OF
        select PINCTRL
        select PINCTRL_SINGLE
+       select COMMON_CLK
+       select ARCH_HAS_RESET_CONTROLLER
+       select CPU_PJ4
        help
          Include support for Marvell MMP2 based platforms using
          the device tree.
@@ -111,21 +114,18 @@ endmenu
 
 config CPU_PXA168
        bool
-       select COMMON_CLK
        select CPU_MOHAWK
        help
          Select code specific to PXA168
 
 config CPU_PXA910
        bool
-       select COMMON_CLK
        select CPU_MOHAWK
        help
          Select code specific to PXA910
 
 config CPU_MMP2
        bool
-       select COMMON_CLK
        select CPU_PJ4
        help
          Select code specific to MMP2. MMP2 is ARMv7 compatible.
index cca529ceecb758101f0120faa547c2790b004e80..b2296c9309b87659566992548a2a177888f1cf03 100644 (file)
 
 #include <linux/irqchip.h>
 #include <linux/of_platform.h>
+#include <linux/clk-provider.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
+#include <asm/hardware/cache-tauros2.h>
 
 #include "common.h"
 
 extern void __init mmp_dt_init_timer(void);
 
-static const struct of_dev_auxdata pxa168_auxdata_lookup[] __initconst = {
-       OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4017000, "pxa2xx-uart.0", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4018000, "pxa2xx-uart.1", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4026000, "pxa2xx-uart.2", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4011000, "pxa2xx-i2c.0", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4025000, "pxa2xx-i2c.1", NULL),
-       OF_DEV_AUXDATA("marvell,mmp-gpio", 0xd4019000, "mmp-gpio", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-rtc", 0xd4010000, "sa1100-rtc", NULL),
-       {}
+static const char *pxa168_dt_board_compat[] __initdata = {
+       "mrvl,pxa168-aspenite",
+       NULL,
 };
 
-static const struct of_dev_auxdata pxa910_auxdata_lookup[] __initconst = {
-       OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4017000, "pxa2xx-uart.0", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4018000, "pxa2xx-uart.1", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4036000, "pxa2xx-uart.2", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4011000, "pxa2xx-i2c.0", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4037000, "pxa2xx-i2c.1", NULL),
-       OF_DEV_AUXDATA("marvell,mmp-gpio", 0xd4019000, "mmp-gpio", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-rtc", 0xd4010000, "sa1100-rtc", NULL),
-       {}
+static const char *pxa910_dt_board_compat[] __initdata = {
+       "mrvl,pxa910-dkb",
+       NULL,
 };
 
-static void __init pxa168_dt_init(void)
-{
-       of_platform_populate(NULL, of_default_bus_match_table,
-                            pxa168_auxdata_lookup, NULL);
-}
-
-static void __init pxa910_dt_init(void)
+static void __init mmp_init_time(void)
 {
-       of_platform_populate(NULL, of_default_bus_match_table,
-                            pxa910_auxdata_lookup, NULL);
+#ifdef CONFIG_CACHE_TAUROS2
+       tauros2_init(0);
+#endif
+       mmp_dt_init_timer();
+       of_clk_init(NULL);
 }
 
-static const char *mmp_dt_board_compat[] __initdata = {
-       "mrvl,pxa168-aspenite",
-       "mrvl,pxa910-dkb",
-       NULL,
-};
-
 DT_MACHINE_START(PXA168_DT, "Marvell PXA168 (Device Tree Support)")
        .map_io         = mmp_map_io,
-       .init_time      = mmp_dt_init_timer,
-       .init_machine   = pxa168_dt_init,
-       .dt_compat      = mmp_dt_board_compat,
+       .init_time      = mmp_init_time,
+       .dt_compat      = pxa168_dt_board_compat,
 MACHINE_END
 
 DT_MACHINE_START(PXA910_DT, "Marvell PXA910 (Device Tree Support)")
        .map_io         = mmp_map_io,
-       .init_time      = mmp_dt_init_timer,
-       .init_machine   = pxa910_dt_init,
-       .dt_compat      = mmp_dt_board_compat,
+       .init_time      = mmp_init_time,
+       .dt_compat      = pxa910_dt_board_compat,
 MACHINE_END
index 023cb453f157ff621110d6c17b6f0cc185b8d8ba..998c0f533abc842659b7f4bcb0d05e2a873a9b58 100644 (file)
 #include <linux/io.h>
 #include <linux/irqchip.h>
 #include <linux/of_platform.h>
+#include <linux/clk-provider.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
+#include <asm/hardware/cache-tauros2.h>
 
 #include "common.h"
 
 extern void __init mmp_dt_init_timer(void);
 
-static const struct of_dev_auxdata mmp2_auxdata_lookup[] __initconst = {
-       OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4030000, "pxa2xx-uart.0", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4017000, "pxa2xx-uart.1", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4018000, "pxa2xx-uart.2", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-uart", 0xd4016000, "pxa2xx-uart.3", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4011000, "pxa2xx-i2c.0", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-twsi", 0xd4025000, "pxa2xx-i2c.1", NULL),
-       OF_DEV_AUXDATA("marvell,mmp-gpio", 0xd4019000, "mmp2-gpio", NULL),
-       OF_DEV_AUXDATA("mrvl,mmp-rtc", 0xd4010000, "sa1100-rtc", NULL),
-       {}
-};
-
-static void __init mmp2_dt_init(void)
+static void __init mmp_init_time(void)
 {
-       of_platform_populate(NULL, of_default_bus_match_table,
-                            mmp2_auxdata_lookup, NULL);
+#ifdef CONFIG_CACHE_TAUROS2
+       tauros2_init(0);
+#endif
+       mmp_dt_init_timer();
+       of_clk_init(NULL);
 }
 
 static const char *mmp2_dt_board_compat[] __initdata = {
@@ -44,7 +37,6 @@ static const char *mmp2_dt_board_compat[] __initdata = {
 
 DT_MACHINE_START(MMP2_DT, "Marvell MMP2 (Device Tree Support)")
        .map_io         = mmp_map_io,
-       .init_time      = mmp_dt_init_timer,
-       .init_machine   = mmp2_dt_init,
+       .init_time      = mmp_init_time,
        .dt_compat      = mmp2_dt_board_compat,
 MACHINE_END
index f0edec199cd4b443ac4590d274a03037341c7855..6ab656cc4f1629e3336f52a18d32b0aa4cb613b7 100644 (file)
@@ -15,7 +15,7 @@ config ARCH_OMAP3
        select ARM_CPU_SUSPEND if PM
        select OMAP_INTERCONNECT
        select PM_OPP if PM
-       select PM_RUNTIME if CPU_IDLE
+       select PM if CPU_IDLE
        select SOC_HAS_OMAP2_SDRC
 
 config ARCH_OMAP4
@@ -32,7 +32,7 @@ config ARCH_OMAP4
        select PL310_ERRATA_588369 if CACHE_L2X0
        select PL310_ERRATA_727915 if CACHE_L2X0
        select PM_OPP if PM
-       select PM_RUNTIME if CPU_IDLE
+       select PM if CPU_IDLE
        select ARM_ERRATA_754322
        select ARM_ERRATA_775420
 
@@ -103,7 +103,7 @@ config ARCH_OMAP2PLUS_TYPICAL
        select I2C_OMAP
        select MENELAUS if ARCH_OMAP2
        select NEON if CPU_V7
-       select PM_RUNTIME
+       select PM
        select REGULATOR
        select TWL4030_CORE if ARCH_OMAP3 || ARCH_OMAP4
        select TWL4030_POWER if ARCH_OMAP3 || ARCH_OMAP4
index 5c5ebb4db5f73031b5ba25a960e8efee5ec885fb..644ff3231bb8d1975c051f248e1d78c4007741cb 100644 (file)
@@ -111,6 +111,7 @@ static struct clk dpll3_ck;
 
 static const char *dpll3_ck_parent_names[] = {
        "sys_ck",
+       "sys_ck",
 };
 
 static const struct clk_ops dpll3_ck_ops = {
@@ -733,6 +734,10 @@ static const char *corex2_fck_parent_names[] = {
 DEFINE_STRUCT_CLK_HW_OMAP(corex2_fck, NULL);
 DEFINE_STRUCT_CLK(corex2_fck, corex2_fck_parent_names, core_ck_ops);
 
+static const char *cpefuse_fck_parent_names[] = {
+       "sys_ck",
+};
+
 static struct clk cpefuse_fck;
 
 static struct clk_hw_omap cpefuse_fck_hw = {
@@ -744,7 +749,7 @@ static struct clk_hw_omap cpefuse_fck_hw = {
        .clkdm_name     = "core_l4_clkdm",
 };
 
-DEFINE_STRUCT_CLK(cpefuse_fck, dpll3_ck_parent_names, aes2_ick_ops);
+DEFINE_STRUCT_CLK(cpefuse_fck, cpefuse_fck_parent_names, aes2_ick_ops);
 
 static struct clk csi2_96m_fck;
 
@@ -775,7 +780,7 @@ static struct clk_hw_omap d2d_26m_fck_hw = {
        .clkdm_name     = "d2d_clkdm",
 };
 
-DEFINE_STRUCT_CLK(d2d_26m_fck, dpll3_ck_parent_names, aes2_ick_ops);
+DEFINE_STRUCT_CLK(d2d_26m_fck, cpefuse_fck_parent_names, aes2_ick_ops);
 
 static struct clk des1_ick;
 
@@ -1046,7 +1051,7 @@ static struct clk_hw_omap dss2_alwon_fck_hw = {
        .clkdm_name     = "dss_clkdm",
 };
 
-DEFINE_STRUCT_CLK(dss2_alwon_fck, dpll3_ck_parent_names, aes2_ick_ops);
+DEFINE_STRUCT_CLK(dss2_alwon_fck, cpefuse_fck_parent_names, aes2_ick_ops);
 
 static struct clk dss_96m_fck;
 
@@ -1368,7 +1373,7 @@ DEFINE_STRUCT_CLK(gpio1_dbck, gpio1_dbck_parent_names, aes2_ick_ops);
 static struct clk wkup_l4_ick;
 
 DEFINE_STRUCT_CLK_HW_OMAP(wkup_l4_ick, "wkup_clkdm");
-DEFINE_STRUCT_CLK(wkup_l4_ick, dpll3_ck_parent_names, core_l4_ick_ops);
+DEFINE_STRUCT_CLK(wkup_l4_ick, cpefuse_fck_parent_names, core_l4_ick_ops);
 
 static struct clk gpio1_ick;
 
@@ -1862,7 +1867,7 @@ static struct clk_hw_omap hecc_ck_hw = {
        .clkdm_name     = "core_l3_clkdm",
 };
 
-DEFINE_STRUCT_CLK(hecc_ck, dpll3_ck_parent_names, aes2_ick_ops);
+DEFINE_STRUCT_CLK(hecc_ck, cpefuse_fck_parent_names, aes2_ick_ops);
 
 static struct clk hsotgusb_fck_am35xx;
 
@@ -1875,7 +1880,7 @@ static struct clk_hw_omap hsotgusb_fck_am35xx_hw = {
        .clkdm_name     = "core_l3_clkdm",
 };
 
-DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, dpll3_ck_parent_names, aes2_ick_ops);
+DEFINE_STRUCT_CLK(hsotgusb_fck_am35xx, cpefuse_fck_parent_names, aes2_ick_ops);
 
 static struct clk hsotgusb_ick_3430es1;
 
@@ -2411,7 +2416,7 @@ static struct clk_hw_omap modem_fck_hw = {
        .clkdm_name     = "d2d_clkdm",
 };
 
-DEFINE_STRUCT_CLK(modem_fck, dpll3_ck_parent_names, aes2_ick_ops);
+DEFINE_STRUCT_CLK(modem_fck, cpefuse_fck_parent_names, aes2_ick_ops);
 
 static struct clk mspro_fck;
 
@@ -2710,7 +2715,7 @@ static struct clk_hw_omap sr1_fck_hw = {
        .clkdm_name     = "wkup_clkdm",
 };
 
-DEFINE_STRUCT_CLK(sr1_fck, dpll3_ck_parent_names, aes2_ick_ops);
+DEFINE_STRUCT_CLK(sr1_fck, cpefuse_fck_parent_names, aes2_ick_ops);
 
 static struct clk sr2_fck;
 
@@ -2724,7 +2729,7 @@ static struct clk_hw_omap sr2_fck_hw = {
        .clkdm_name     = "wkup_clkdm",
 };
 
-DEFINE_STRUCT_CLK(sr2_fck, dpll3_ck_parent_names, aes2_ick_ops);
+DEFINE_STRUCT_CLK(sr2_fck, cpefuse_fck_parent_names, aes2_ick_ops);
 
 static struct clk sr_l4_ick;
 
index 20e120d071dd5853b228106d1164c0e251b2cf45..c2da2a0fe5ad64df80d6290f45658c697cfd4c96 100644 (file)
@@ -474,7 +474,7 @@ void omap3_noncore_dpll_disable(struct clk_hw *hw)
  */
 long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate,
                                       unsigned long *best_parent_rate,
-                                      struct clk **best_parent_clk)
+                                      struct clk_hw **best_parent_clk)
 {
        struct clk_hw_omap *clk = to_clk_hw_omap(hw);
        struct dpll_data *dd;
@@ -488,10 +488,10 @@ long omap3_noncore_dpll_determine_rate(struct clk_hw *hw, unsigned long rate,
 
        if (__clk_get_rate(dd->clk_bypass) == rate &&
            (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
-               *best_parent_clk = dd->clk_bypass;
+               *best_parent_clk = __clk_get_hw(dd->clk_bypass);
        } else {
                rate = omap2_dpll_round_rate(hw, rate, best_parent_rate);
-               *best_parent_clk = dd->clk_ref;
+               *best_parent_clk = __clk_get_hw(dd->clk_ref);
        }
 
        *best_parent_rate = rate;
index 535822fcf4bbbd1e57a1211d122449d0a8a5807d..0e58e5a85d5309abf55d74a65d4ef47b28910c0f 100644 (file)
@@ -223,7 +223,7 @@ out:
  */
 long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate,
                                        unsigned long *best_parent_rate,
-                                       struct clk **best_parent_clk)
+                                       struct clk_hw **best_parent_clk)
 {
        struct clk_hw_omap *clk = to_clk_hw_omap(hw);
        struct dpll_data *dd;
@@ -237,11 +237,11 @@ long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw, unsigned long rate,
 
        if (__clk_get_rate(dd->clk_bypass) == rate &&
            (dd->modes & (1 << DPLL_LOW_POWER_BYPASS))) {
-               *best_parent_clk = dd->clk_bypass;
+               *best_parent_clk = __clk_get_hw(dd->clk_bypass);
        } else {
                rate = omap4_dpll_regm4xen_round_rate(hw, rate,
                                                      best_parent_rate);
-               *best_parent_clk = dd->clk_ref;
+               *best_parent_clk = __clk_get_hw(dd->clk_ref);
        }
 
        *best_parent_rate = rate;
index dd301be89ecccfb6fe9f1d7c02a69bafcfea1559..5376d908eabedbdc496648c1fe2f4316fcd1cbe3 100644 (file)
@@ -1,6 +1,7 @@
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
 CONFIG_AUDIT=y
 CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
@@ -13,14 +14,12 @@ CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RESOURCE_COUNTERS=y
 CONFIG_MEMCG=y
 CONFIG_MEMCG_SWAP=y
 CONFIG_MEMCG_KMEM=y
 CONFIG_CGROUP_HUGETLB=y
 # CONFIG_UTS_NS is not set
 # CONFIG_IPC_NS is not set
-# CONFIG_PID_NS is not set
 # CONFIG_NET_NS is not set
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
@@ -92,7 +91,6 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
 CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_VIRTIO_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
-# CONFIG_HMC_DRV is not set
 CONFIG_SPI=y
 CONFIG_SPI_PL022=y
 CONFIG_GPIO_PL061=y
@@ -133,6 +131,8 @@ CONFIG_EXT3_FS=y
 CONFIG_EXT4_FS=y
 CONFIG_FANOTIFY=y
 CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=y
 CONFIG_VFAT_FS=y
@@ -152,14 +152,15 @@ CONFIG_MAGIC_SYSRQ=y
 CONFIG_DEBUG_KERNEL=y
 CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
 # CONFIG_FTRACE is not set
+CONFIG_KEYS=y
 CONFIG_SECURITY=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
 CONFIG_CRYPTO_SHA2_ARM64_CE=y
 CONFIG_CRYPTO_GHASH_ARM64_CE=y
-CONFIG_CRYPTO_AES_ARM64_CE=y
 CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
 CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
 CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
index d34189bceff7760a83b6095339bd3bce7646d4ac..9ce3e680ae1c6f2b78dab11bf5d89384f5366f23 100644 (file)
@@ -52,13 +52,14 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
        dev->archdata.dma_ops = ops;
 }
 
-static inline int set_arch_dma_coherent_ops(struct device *dev)
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+                                     struct iommu_ops *iommu, bool coherent)
 {
-       dev->archdata.dma_coherent = true;
-       set_dma_ops(dev, &coherent_swiotlb_dma_ops);
-       return 0;
+       dev->archdata.dma_coherent = coherent;
+       if (coherent)
+               set_dma_ops(dev, &coherent_swiotlb_dma_ops);
 }
-#define set_arch_dma_coherent_ops      set_arch_dma_coherent_ops
+#define arch_setup_dma_ops     arch_setup_dma_ops
 
 /* do not use this function in a driver */
 static inline bool is_device_dma_coherent(struct device *dev)
index df22314f57cfda8972fff87a3071f6b82eb0ad71..210d632aa5ad38b44a6a0b49d876e0c9cab5ca6a 100644 (file)
@@ -298,7 +298,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
 #define pfn_pmd(pfn,prot)      (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
 #define mk_pmd(page,prot)      pfn_pmd(page_to_pfn(page),prot)
 
-#define pmd_page(pmd)           pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
 #define pud_write(pud)         pte_write(pud_pte(pud))
 #define pud_pfn(pud)           (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
 
@@ -401,7 +400,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
        return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
 }
 
-#define pud_page(pud)           pmd_page(pud_pmd(pud))
+#define pud_page(pud)          pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
 
 #endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
 
@@ -437,6 +436,8 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
        return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
 }
 
+#define pgd_page(pgd)          pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
+
 #endif  /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
 
 #define pgd_ERROR(pgd)         __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
index c45b7b1b71978c6a71b58ef623c30c4e3f652393..cee128732435c7b99fdedd3d690659f96ab81a9b 100644 (file)
@@ -99,12 +99,12 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
 
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
-       return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
+       return !arch_spin_value_unlocked(READ_ONCE(*lock));
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 {
-       arch_spinlock_t lockval = ACCESS_ONCE(*lock);
+       arch_spinlock_t lockval = READ_ONCE(*lock);
        return (lockval.next - lockval.owner) > 1;
 }
 #define arch_spin_is_contended arch_spin_is_contended
index 3771b72b6569fbf87acefb6de0741a5af8ec6548..2d6b6065fe7f4ea7ceaf9e728066edb23dae5a0b 100644 (file)
@@ -5,6 +5,7 @@
 #include <asm/debug-monitors.h>
 #include <asm/pgtable.h>
 #include <asm/memory.h>
+#include <asm/mmu_context.h>
 #include <asm/smp_plat.h>
 #include <asm/suspend.h>
 #include <asm/tlbflush.h>
@@ -98,7 +99,18 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
         */
        ret = __cpu_suspend_enter(arg, fn);
        if (ret == 0) {
-               cpu_switch_mm(mm->pgd, mm);
+               /*
+                * We are resuming from reset with TTBR0_EL1 set to the
+                * idmap to enable the MMU; restore the active_mm mappings in
+                * TTBR0_EL1 unless the active_mm == &init_mm, in which case
+                * the thread entered __cpu_suspend with TTBR0_EL1 set to
+                * reserved TTBR0 page tables and should be restored as such.
+                */
+               if (mm == &init_mm)
+                       cpu_set_reserved_ttbr0();
+               else
+                       cpu_switch_mm(mm->pgd, mm);
+
                flush_tlb_all();
 
                /*
index b0a608da7bd13d5eff6f2e925153dbd7c47eadb0..b964c667acedfd6fbc2583a2205a2898cb70eb1c 100644 (file)
@@ -30,8 +30,7 @@
 /* Copy to userspace.  This is based on the memcpy used for
    kernel-to-kernel copying; see "string.c".  */
 
-unsigned long
-__copy_user (void __user *pdst, const void *psrc, unsigned long pn)
+unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
@@ -187,13 +186,14 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
 
   return retn;
 }
+EXPORT_SYMBOL(__copy_user);
 
 /* Copy from user to kernel, zeroing the bytes that were inaccessible in
    userland.  The return-value is the number of bytes that were
    inaccessible.  */
 
-unsigned long
-__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn)
+unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+                                 unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
@@ -369,11 +369,10 @@ copy_exception_bytes:
 
   return retn + n;
 }
+EXPORT_SYMBOL(__copy_user_zeroing);
 
 /* Zero userspace.  */
-
-unsigned long
-__do_clear_user (void __user *pto, unsigned long pn)
+unsigned long __do_clear_user(void __user *pto, unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
@@ -521,3 +520,4 @@ __do_clear_user (void __user *pto, unsigned long pn)
 
   return retn;
 }
+EXPORT_SYMBOL(__do_clear_user);
index 15a9ed1d579cf8ca10cbc38795c7a2b775449012..4fc16b44fff26e5196729e61311464bc2c214b82 100644 (file)
@@ -108,6 +108,7 @@ config ETRAX_AXISFLASHMAP
        select MTD_JEDECPROBE
        select MTD_BLOCK
        select MTD_COMPLEX_MAPPINGS
+       select MTD_MTDRAM
        help
          This option enables MTD mapping of flash devices.  Needed to use
          flash memories.  If unsure, say Y.
@@ -358,13 +359,6 @@ config ETRAX_SPI_MMC
        default MMC
        select SPI
        select MMC_SPI
-       select ETRAX_SPI_MMC_BOARD
-
-# For the parts that can't be a module (due to restrictions in
-# framework elsewhere).
-config ETRAX_SPI_MMC_BOARD
-       boolean
-       default n
 
 # While the board info is MMC_SPI only, the drivers are written to be
 # independent of MMC_SPI, so we'll keep SPI non-dependent on the
index 39aa3c117a86fd83a7bc4d4059e16e57edd06b16..15fbfefced2c43f6998198162a069aa7070073c1 100644 (file)
@@ -10,4 +10,3 @@ obj-$(CONFIG_ETRAX_IOP_FW_LOAD)         += iop_fw_load.o
 obj-$(CONFIG_ETRAX_I2C)                        += i2c.o
 obj-$(CONFIG_ETRAX_SYNCHRONOUS_SERIAL) += sync_serial.o
 obj-$(CONFIG_PCI)                      += pci/
-obj-$(CONFIG_ETRAX_SPI_MMC_BOARD)      += board_mmcspi.o
index c073cf4ba016282d11ea5ee7631bf54b288bb5a6..d9cc856f89fb387e495983d018670504270a8cba 100644 (file)
@@ -2,7 +2,6 @@
 #include <linux/init.h>
 
 /* High level I2C actions */
-int __init i2c_init(void);
 int i2c_write(unsigned char theSlave, void *data, size_t nbytes);
 int i2c_read(unsigned char theSlave, void *data, size_t nbytes);
 int i2c_writereg(unsigned char theSlave, unsigned char theReg, unsigned char theValue);
index 5a149134cfb58267bfee6e38121ed4fe7b9de76c..08a313fc22418c326987dc36c9c9203de639e270 100644 (file)
@@ -1,8 +1,7 @@
 /*
- * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
- *
- * Copyright (c) 2005 Axis Communications AB
+ * Simple synchronous serial port driver for ETRAX FS and ARTPEC-3.
  *
+ * Copyright (c) 2005, 2008 Axis Communications AB
  * Author: Mikael Starvik
  *
  */
 #include <linux/mutex.h>
 #include <linux/interrupt.h>
 #include <linux/poll.h>
-#include <linux/init.h>
-#include <linux/timer.h>
-#include <linux/spinlock.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
 #include <linux/wait.h>
 
 #include <asm/io.h>
-#include <dma.h>
+#include <mach/dma.h>
 #include <pinmux.h>
 #include <hwregs/reg_rdwr.h>
 #include <hwregs/sser_defs.h>
+#include <hwregs/timer_defs.h>
 #include <hwregs/dma_defs.h>
 #include <hwregs/dma.h>
 #include <hwregs/intr_vect_defs.h>
 /* the rest of the data pointed out by Descr1 and set readp to the start */
 /* of Descr2                                                             */
 
-#define SYNC_SERIAL_MAJOR 125
-
 /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
 /* words can be handled */
-#define IN_BUFFER_SIZE 12288
-#define IN_DESCR_SIZE 256
-#define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
+#define IN_DESCR_SIZE SSP_INPUT_CHUNK_SIZE
+#define NBR_IN_DESCR (8*6)
+#define IN_BUFFER_SIZE (IN_DESCR_SIZE * NBR_IN_DESCR)
 
-#define OUT_BUFFER_SIZE 1024*8
 #define NBR_OUT_DESCR 8
+#define OUT_BUFFER_SIZE (1024 * NBR_OUT_DESCR)
 
 #define DEFAULT_FRAME_RATE 0
 #define DEFAULT_WORD_RATE 7
 
+/* To be removed when we move to pure udev. */
+#define SYNC_SERIAL_MAJOR 125
+
 /* NOTE: Enabling some debug will likely cause overrun or underrun,
- * especially if manual mode is use.
+ * especially if manual mode is used.
  */
 #define DEBUG(x)
 #define DEBUGREAD(x)
 #define DEBUGTRDMA(x)
 #define DEBUGOUTBUF(x)
 
-typedef struct sync_port
-{
-       reg_scope_instances regi_sser;
-       reg_scope_instances regi_dmain;
-       reg_scope_instances regi_dmaout;
+enum syncser_irq_setup {
+       no_irq_setup = 0,
+       dma_irq_setup = 1,
+       manual_irq_setup = 2,
+};
+
+struct sync_port {
+       unsigned long regi_sser;
+       unsigned long regi_dmain;
+       unsigned long regi_dmaout;
+
+       /* Interrupt vectors. */
+       unsigned long dma_in_intr_vect; /* Used for DMA in. */
+       unsigned long dma_out_intr_vect; /* Used for DMA out. */
+       unsigned long syncser_intr_vect; /* Used when no DMA. */
+
+       /* DMA number for in and out. */
+       unsigned int dma_in_nbr;
+       unsigned int dma_out_nbr;
+
+       /* DMA owner. */
+       enum dma_owner req_dma;
 
        char started; /* 1 if port has been started */
        char port_nbr; /* Port 0 or 1 */
@@ -99,22 +117,29 @@ typedef struct sync_port
        char use_dma;  /* 1 if port uses dma */
        char tr_running;
 
-       char init_irqs;
+       enum syncser_irq_setup init_irqs;
        int output;
        int input;
 
        /* Next byte to be read by application */
-       volatile unsigned char *volatile readp;
+       unsigned char *readp;
        /* Next byte to be written by etrax */
-       volatile unsigned char *volatile writep;
+       unsigned char *writep;
 
        unsigned int in_buffer_size;
+       unsigned int in_buffer_len;
        unsigned int inbufchunk;
-       unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
-       unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
-       unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
-       struct dma_descr_data* next_rx_desc;
-       struct dma_descr_data* prev_rx_desc;
+       /* Data buffers for in and output. */
+       unsigned char out_buffer[OUT_BUFFER_SIZE] __aligned(32);
+       unsigned char in_buffer[IN_BUFFER_SIZE] __aligned(32);
+       unsigned char flip[IN_BUFFER_SIZE] __aligned(32);
+       struct timespec timestamp[NBR_IN_DESCR];
+       struct dma_descr_data *next_rx_desc;
+       struct dma_descr_data *prev_rx_desc;
+
+       struct timeval last_timestamp;
+       int read_ts_idx;
+       int write_ts_idx;
 
        /* Pointer to the first available descriptor in the ring,
         * unless active_tr_descr == catch_tr_descr and a dma
@@ -135,114 +160,138 @@ typedef struct sync_port
        /* Number of bytes currently locked for being read by DMA */
        int out_buf_count;
 
-       dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
-       dma_descr_context in_context __attribute__ ((__aligned__(32)));
-       dma_descr_data out_descr[NBR_OUT_DESCR]
-               __attribute__ ((__aligned__(16)));
-       dma_descr_context out_context __attribute__ ((__aligned__(32)));
+       dma_descr_context in_context __aligned(32);
+       dma_descr_context out_context __aligned(32);
+       dma_descr_data in_descr[NBR_IN_DESCR] __aligned(16);
+       dma_descr_data out_descr[NBR_OUT_DESCR] __aligned(16);
+
        wait_queue_head_t out_wait_q;
        wait_queue_head_t in_wait_q;
 
        spinlock_t lock;
-} sync_port;
+};
 
 static DEFINE_MUTEX(sync_serial_mutex);
 static int etrax_sync_serial_init(void);
 static void initialize_port(int portnbr);
 static inline int sync_data_avail(struct sync_port *port);
 
-static int sync_serial_open(struct inode *, struct file*);
-static int sync_serial_release(struct inode*, struct file*);
+static int sync_serial_open(struct inode *, struct file *);
+static int sync_serial_release(struct inode *, struct file *);
 static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
 
-static int sync_serial_ioctl(struct file *,
-                            unsigned int cmd, unsigned long arg);
-static ssize_t sync_serial_write(struct file * file, const char * buf,
+static long sync_serial_ioctl(struct file *file,
+                             unsigned int cmd, unsigned long arg);
+static int sync_serial_ioctl_unlocked(struct file *file,
+                                     unsigned int cmd, unsigned long arg);
+static ssize_t sync_serial_write(struct file *file, const char __user *buf,
                                 size_t count, loff_t *ppos);
-static ssize_t sync_serial_read(struct file *file, char *buf,
+static ssize_t sync_serial_read(struct file *file, char __user *buf,
                                size_t count, loff_t *ppos);
 
-#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
-     defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
-    (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
-     defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
+#if ((defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
+       defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
+       (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
+       defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)))
 #define SYNC_SER_DMA
+#else
+#define SYNC_SER_MANUAL
 #endif
 
-static void send_word(sync_port* port);
-static void start_dma_out(struct sync_port *port, const char *data, int count);
-static void start_dma_in(sync_port* port);
 #ifdef SYNC_SER_DMA
+static void start_dma_out(struct sync_port *port, const char *data, int count);
+static void start_dma_in(struct sync_port *port);
 static irqreturn_t tr_interrupt(int irq, void *dev_id);
 static irqreturn_t rx_interrupt(int irq, void *dev_id);
 #endif
-
-#if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
-     !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
-    (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
-     !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
-#define SYNC_SER_MANUAL
-#endif
 #ifdef SYNC_SER_MANUAL
+static void send_word(struct sync_port *port);
 static irqreturn_t manual_interrupt(int irq, void *dev_id);
 #endif
 
-#ifdef CONFIG_ETRAXFS  /* ETRAX FS */
-#define OUT_DMA_NBR 4
-#define IN_DMA_NBR 5
-#define PINMUX_SSER pinmux_sser0
-#define SYNCSER_INST regi_sser0
-#define SYNCSER_INTR_VECT SSER0_INTR_VECT
-#define OUT_DMA_INST regi_dma4
-#define IN_DMA_INST regi_dma5
-#define DMA_OUT_INTR_VECT DMA4_INTR_VECT
-#define DMA_IN_INTR_VECT DMA5_INTR_VECT
-#define REQ_DMA_SYNCSER dma_sser0
-#else                  /* Artpec-3 */
-#define OUT_DMA_NBR 6
-#define IN_DMA_NBR 7
-#define PINMUX_SSER pinmux_sser
-#define SYNCSER_INST regi_sser
-#define SYNCSER_INTR_VECT SSER_INTR_VECT
-#define OUT_DMA_INST regi_dma6
-#define IN_DMA_INST regi_dma7
-#define DMA_OUT_INTR_VECT DMA6_INTR_VECT
-#define DMA_IN_INTR_VECT DMA7_INTR_VECT
-#define REQ_DMA_SYNCSER dma_sser
+#define artpec_pinmux_alloc_fixed crisv32_pinmux_alloc_fixed
+#define artpec_request_dma crisv32_request_dma
+#define artpec_free_dma crisv32_free_dma
+
+#ifdef CONFIG_ETRAXFS
+/* ETRAX FS */
+#define DMA_OUT_NBR0           SYNC_SER0_TX_DMA_NBR
+#define DMA_IN_NBR0            SYNC_SER0_RX_DMA_NBR
+#define DMA_OUT_NBR1           SYNC_SER1_TX_DMA_NBR
+#define DMA_IN_NBR1            SYNC_SER1_RX_DMA_NBR
+#define PINMUX_SSER0           pinmux_sser0
+#define PINMUX_SSER1           pinmux_sser1
+#define SYNCSER_INST0          regi_sser0
+#define SYNCSER_INST1          regi_sser1
+#define SYNCSER_INTR_VECT0     SSER0_INTR_VECT
+#define SYNCSER_INTR_VECT1     SSER1_INTR_VECT
+#define OUT_DMA_INST0          regi_dma4
+#define IN_DMA_INST0           regi_dma5
+#define DMA_OUT_INTR_VECT0     DMA4_INTR_VECT
+#define DMA_OUT_INTR_VECT1     DMA7_INTR_VECT
+#define DMA_IN_INTR_VECT0      DMA5_INTR_VECT
+#define DMA_IN_INTR_VECT1      DMA6_INTR_VECT
+#define REQ_DMA_SYNCSER0       dma_sser0
+#define REQ_DMA_SYNCSER1       dma_sser1
+#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
+#define PORT1_DMA 1
+#else
+#define PORT1_DMA 0
+#endif
+#elif defined(CONFIG_CRIS_MACH_ARTPEC3)
+/* ARTPEC-3 */
+#define DMA_OUT_NBR0           SYNC_SER_TX_DMA_NBR
+#define DMA_IN_NBR0            SYNC_SER_RX_DMA_NBR
+#define PINMUX_SSER0           pinmux_sser
+#define SYNCSER_INST0          regi_sser
+#define SYNCSER_INTR_VECT0     SSER_INTR_VECT
+#define OUT_DMA_INST0          regi_dma6
+#define IN_DMA_INST0           regi_dma7
+#define DMA_OUT_INTR_VECT0     DMA6_INTR_VECT
+#define DMA_IN_INTR_VECT0      DMA7_INTR_VECT
+#define REQ_DMA_SYNCSER0       dma_sser
+#define REQ_DMA_SYNCSER1       dma_sser
 #endif
 
-/* The ports */
-static struct sync_port ports[]=
-{
-       {
-               .regi_sser             = SYNCSER_INST,
-               .regi_dmaout           = OUT_DMA_INST,
-               .regi_dmain            = IN_DMA_INST,
 #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
-                .use_dma               = 1,
+#define PORT0_DMA 1
 #else
-                .use_dma               = 0,
+#define PORT0_DMA 0
 #endif
-       }
-#ifdef CONFIG_ETRAXFS
-       ,
 
+/* The ports */
+static struct sync_port ports[] = {
        {
-               .regi_sser             = regi_sser1,
-               .regi_dmaout           = regi_dma6,
-               .regi_dmain            = regi_dma7,
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
-                .use_dma               = 1,
-#else
-                .use_dma               = 0,
-#endif
-       }
+               .regi_sser              = SYNCSER_INST0,
+               .regi_dmaout            = OUT_DMA_INST0,
+               .regi_dmain             = IN_DMA_INST0,
+               .use_dma                = PORT0_DMA,
+               .dma_in_intr_vect       = DMA_IN_INTR_VECT0,
+               .dma_out_intr_vect      = DMA_OUT_INTR_VECT0,
+               .dma_in_nbr             = DMA_IN_NBR0,
+               .dma_out_nbr            = DMA_OUT_NBR0,
+               .req_dma                = REQ_DMA_SYNCSER0,
+               .syncser_intr_vect      = SYNCSER_INTR_VECT0,
+       },
+#ifdef CONFIG_ETRAXFS
+       {
+               .regi_sser              = SYNCSER_INST1,
+               .regi_dmaout            = regi_dma6,
+               .regi_dmain             = regi_dma7,
+               .use_dma                = PORT1_DMA,
+               .dma_in_intr_vect       = DMA_IN_INTR_VECT1,
+               .dma_out_intr_vect      = DMA_OUT_INTR_VECT1,
+               .dma_in_nbr             = DMA_IN_NBR1,
+               .dma_out_nbr            = DMA_OUT_NBR1,
+               .req_dma                = REQ_DMA_SYNCSER1,
+               .syncser_intr_vect      = SYNCSER_INTR_VECT1,
+       },
 #endif
 };
 
 #define NBR_PORTS ARRAY_SIZE(ports)
 
-static const struct file_operations sync_serial_fops = {
+static const struct file_operations syncser_fops = {
        .owner          = THIS_MODULE,
        .write          = sync_serial_write,
        .read           = sync_serial_read,
@@ -253,61 +302,40 @@ static const struct file_operations sync_serial_fops = {
        .llseek         = noop_llseek,
 };
 
-static int __init etrax_sync_serial_init(void)
-{
-       ports[0].enabled = 0;
-#ifdef CONFIG_ETRAXFS
-       ports[1].enabled = 0;
-#endif
-       if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
-                       &sync_serial_fops) < 0) {
-               printk(KERN_WARNING
-                       "Unable to get major for synchronous serial port\n");
-               return -EBUSY;
-       }
-
-       /* Initialize Ports */
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
-       if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
-               printk(KERN_WARNING
-                       "Unable to alloc pins for synchronous serial port 0\n");
-               return -EIO;
-       }
-       ports[0].enabled = 1;
-       initialize_port(0);
-#endif
-
-#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
-       if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
-               printk(KERN_WARNING
-                       "Unable to alloc pins for synchronous serial port 0\n");
-               return -EIO;
-       }
-       ports[1].enabled = 1;
-       initialize_port(1);
-#endif
+static dev_t syncser_first;
+static int minor_count = NBR_PORTS;
+#define SYNCSER_NAME "syncser"
+static struct cdev *syncser_cdev;
+static struct class *syncser_class;
 
-#ifdef CONFIG_ETRAXFS
-       printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
-#else
-       printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
-#endif
-       return 0;
+static void sync_serial_start_port(struct sync_port *port)
+{
+       reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
+       reg_sser_rw_tr_cfg tr_cfg =
+               REG_RD(sser, port->regi_sser, rw_tr_cfg);
+       reg_sser_rw_rec_cfg rec_cfg =
+               REG_RD(sser, port->regi_sser, rw_rec_cfg);
+       cfg.en = regk_sser_yes;
+       tr_cfg.tr_en = regk_sser_yes;
+       rec_cfg.rec_en = regk_sser_yes;
+       REG_WR(sser, port->regi_sser, rw_cfg, cfg);
+       REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
+       REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
+       port->started = 1;
 }
 
 static void __init initialize_port(int portnbr)
 {
-       int __attribute__((unused)) i;
        struct sync_port *port = &ports[portnbr];
-       reg_sser_rw_cfg cfg = {0};
-       reg_sser_rw_frm_cfg frm_cfg = {0};
-       reg_sser_rw_tr_cfg tr_cfg = {0};
-       reg_sser_rw_rec_cfg rec_cfg = {0};
+       reg_sser_rw_cfg cfg = { 0 };
+       reg_sser_rw_frm_cfg frm_cfg = { 0 };
+       reg_sser_rw_tr_cfg tr_cfg = { 0 };
+       reg_sser_rw_rec_cfg rec_cfg = { 0 };
 
-       DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
+       DEBUG(pr_info("Init sync serial port %d\n", portnbr));
 
        port->port_nbr = portnbr;
-       port->init_irqs = 1;
+       port->init_irqs = no_irq_setup;
 
        port->out_rd_ptr = port->out_buffer;
        port->out_buf_count = 0;
@@ -318,10 +346,11 @@ static void __init initialize_port(int portnbr)
        port->readp = port->flip;
        port->writep = port->flip;
        port->in_buffer_size = IN_BUFFER_SIZE;
+       port->in_buffer_len = 0;
        port->inbufchunk = IN_DESCR_SIZE;
-       port->next_rx_desc = &port->in_descr[0];
-       port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
-       port->prev_rx_desc->eol = 1;
+
+       port->read_ts_idx = 0;
+       port->write_ts_idx = 0;
 
        init_waitqueue_head(&port->out_wait_q);
        init_waitqueue_head(&port->in_wait_q);
@@ -368,14 +397,18 @@ static void __init initialize_port(int portnbr)
        REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
 
 #ifdef SYNC_SER_DMA
-       /* Setup the descriptor ring for dma out/transmit. */
-       for (i = 0; i < NBR_OUT_DESCR; i++) {
-               port->out_descr[i].wait = 0;
-               port->out_descr[i].intr = 1;
-               port->out_descr[i].eol = 0;
-               port->out_descr[i].out_eop = 0;
-               port->out_descr[i].next =
-                       (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
+       {
+               int i;
+               /* Setup the descriptor ring for dma out/transmit. */
+               for (i = 0; i < NBR_OUT_DESCR; i++) {
+                       dma_descr_data *descr = &port->out_descr[i];
+                       descr->wait = 0;
+                       descr->intr = 1;
+                       descr->eol = 0;
+                       descr->out_eop = 0;
+                       descr->next =
+                               (dma_descr_data *)virt_to_phys(&descr[i+1]);
+               }
        }
 
        /* Create a ring from the list. */
@@ -391,201 +424,116 @@ static void __init initialize_port(int portnbr)
 
 static inline int sync_data_avail(struct sync_port *port)
 {
-       int avail;
-       unsigned char *start;
-       unsigned char *end;
-
-       start = (unsigned char*)port->readp; /* cast away volatile */
-       end = (unsigned char*)port->writep;  /* cast away volatile */
-       /* 0123456789  0123456789
-        *  -----      -    -----
-        *  ^rp  ^wp    ^wp ^rp
-        */
-
-       if (end >= start)
-               avail = end - start;
-       else
-               avail = port->in_buffer_size - (start - end);
-       return avail;
-}
-
-static inline int sync_data_avail_to_end(struct sync_port *port)
-{
-       int avail;
-       unsigned char *start;
-       unsigned char *end;
-
-       start = (unsigned char*)port->readp; /* cast away volatile */
-       end = (unsigned char*)port->writep;  /* cast away volatile */
-       /* 0123456789  0123456789
-        *  -----           -----
-        *  ^rp  ^wp    ^wp ^rp
-        */
-
-       if (end >= start)
-               avail = end - start;
-       else
-               avail = port->flip + port->in_buffer_size - start;
-       return avail;
+       return port->in_buffer_len;
 }
 
 static int sync_serial_open(struct inode *inode, struct file *file)
 {
+       int ret = 0;
        int dev = iminor(inode);
-       int ret = -EBUSY;
-       sync_port *port;
-       reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
-       reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
+       struct sync_port *port;
+#ifdef SYNC_SER_DMA
+       reg_dma_rw_cfg cfg = { .en = regk_dma_yes };
+       reg_dma_rw_intr_mask intr_mask = { .data = regk_dma_yes };
+#endif
 
-       mutex_lock(&sync_serial_mutex);
-       DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
+       DEBUG(pr_debug("Open sync serial port %d\n", dev));
 
-       if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
-       {
-               DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
-               ret = -ENODEV;
-               goto out;
+       if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+               DEBUG(pr_info("Invalid minor %d\n", dev));
+               return -ENODEV;
        }
        port = &ports[dev];
        /* Allow open this device twice (assuming one reader and one writer) */
-       if (port->busy == 2)
-       {
-               DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
-               goto out;
+       if (port->busy == 2) {
+               DEBUG(pr_info("syncser%d is busy\n", dev));
+               return -EBUSY;
        }
 
+       mutex_lock(&sync_serial_mutex);
 
-       if (port->init_irqs) {
-               if (port->use_dma) {
-                       if (port == &ports[0]) {
-#ifdef SYNC_SER_DMA
-                               if (request_irq(DMA_OUT_INTR_VECT,
-                                               tr_interrupt,
-                                               0,
-                                               "synchronous serial 0 dma tr",
-                                               &ports[0])) {
-                                       printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
-                                       goto out;
-                               } else if (request_irq(DMA_IN_INTR_VECT,
-                                               rx_interrupt,
-                                               0,
-                                               "synchronous serial 1 dma rx",
-                                               &ports[0])) {
-                                       free_irq(DMA_OUT_INTR_VECT, &port[0]);
-                                       printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
-                                       goto out;
-                               } else if (crisv32_request_dma(OUT_DMA_NBR,
-                                               "synchronous serial 0 dma tr",
-                                               DMA_VERBOSE_ON_ERROR,
-                                               0,
-                                               REQ_DMA_SYNCSER)) {
-                                       free_irq(DMA_OUT_INTR_VECT, &port[0]);
-                                       free_irq(DMA_IN_INTR_VECT, &port[0]);
-                                       printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
-                                       goto out;
-                               } else if (crisv32_request_dma(IN_DMA_NBR,
-                                               "synchronous serial 0 dma rec",
-                                               DMA_VERBOSE_ON_ERROR,
-                                               0,
-                                               REQ_DMA_SYNCSER)) {
-                                       crisv32_free_dma(OUT_DMA_NBR);
-                                       free_irq(DMA_OUT_INTR_VECT, &port[0]);
-                                       free_irq(DMA_IN_INTR_VECT, &port[0]);
-                                       printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
-                                       goto out;
-                               }
-#endif
-                       }
-#ifdef CONFIG_ETRAXFS
-                       else if (port == &ports[1]) {
+       /* Clear any stale date left in the flip buffer */
+       port->readp = port->writep = port->flip;
+       port->in_buffer_len = 0;
+       port->read_ts_idx = 0;
+       port->write_ts_idx = 0;
+
+       if (port->init_irqs != no_irq_setup) {
+               /* Init only on first call. */
+               port->busy++;
+               mutex_unlock(&sync_serial_mutex);
+               return 0;
+       }
+       if (port->use_dma) {
 #ifdef SYNC_SER_DMA
-                               if (request_irq(DMA6_INTR_VECT,
-                                               tr_interrupt,
-                                               0,
-                                               "synchronous serial 1 dma tr",
-                                               &ports[1])) {
-                                       printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
-                                       goto out;
-                               } else if (request_irq(DMA7_INTR_VECT,
-                                                      rx_interrupt,
-                                                      0,
-                                                      "synchronous serial 1 dma rx",
-                                                      &ports[1])) {
-                                       free_irq(DMA6_INTR_VECT, &ports[1]);
-                                       printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
-                                       goto out;
-                               } else if (crisv32_request_dma(
-                                               SYNC_SER1_TX_DMA_NBR,
-                                               "synchronous serial 1 dma tr",
-                                               DMA_VERBOSE_ON_ERROR,
-                                               0,
-                                               dma_sser1)) {
-                                       free_irq(DMA6_INTR_VECT, &ports[1]);
-                                       free_irq(DMA7_INTR_VECT, &ports[1]);
-                                       printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
-                                       goto out;
-                               } else if (crisv32_request_dma(
-                                               SYNC_SER1_RX_DMA_NBR,
-                                               "synchronous serial 3 dma rec",
-                                               DMA_VERBOSE_ON_ERROR,
-                                               0,
-                                               dma_sser1)) {
-                                       crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
-                                       free_irq(DMA6_INTR_VECT, &ports[1]);
-                                       free_irq(DMA7_INTR_VECT, &ports[1]);
-                                       printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
-                                       goto out;
-                               }
-#endif
-                       }
+               const char *tmp;
+               DEBUG(pr_info("Using DMA for syncser%d\n", dev));
+
+               tmp = dev == 0 ? "syncser0 tx" : "syncser1 tx";
+               if (request_irq(port->dma_out_intr_vect, tr_interrupt, 0,
+                               tmp, port)) {
+                       pr_err("Can't alloc syncser%d TX IRQ", dev);
+                       ret = -EBUSY;
+                       goto unlock_and_exit;
+               }
+               if (artpec_request_dma(port->dma_out_nbr, tmp,
+                               DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
+                       free_irq(port->dma_out_intr_vect, port);
+                       pr_err("Can't alloc syncser%d TX DMA", dev);
+                       ret = -EBUSY;
+                       goto unlock_and_exit;
+               }
+               tmp = dev == 0 ? "syncser0 rx" : "syncser1 rx";
+               if (request_irq(port->dma_in_intr_vect, rx_interrupt, 0,
+                               tmp, port)) {
+                       artpec_free_dma(port->dma_out_nbr);
+                       free_irq(port->dma_out_intr_vect, port);
+                       pr_err("Can't alloc syncser%d RX IRQ", dev);
+                       ret = -EBUSY;
+                       goto unlock_and_exit;
+               }
+               if (artpec_request_dma(port->dma_in_nbr, tmp,
+                               DMA_VERBOSE_ON_ERROR, 0, port->req_dma)) {
+                       artpec_free_dma(port->dma_out_nbr);
+                       free_irq(port->dma_out_intr_vect, port);
+                       free_irq(port->dma_in_intr_vect, port);
+                       pr_err("Can't alloc syncser%d RX DMA", dev);
+                       ret = -EBUSY;
+                       goto unlock_and_exit;
+               }
+               /* Enable DMAs */
+               REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
+               REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
+               /* Enable DMA IRQs */
+               REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
+               REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
+               /* Set up wordsize = 1 for DMAs. */
+               DMA_WR_CMD(port->regi_dmain, regk_dma_set_w_size1);
+               DMA_WR_CMD(port->regi_dmaout, regk_dma_set_w_size1);
+
+               start_dma_in(port);
+               port->init_irqs = dma_irq_setup;
 #endif
-                        /* Enable DMAs */
-                       REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
-                       REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
-                       /* Enable DMA IRQs */
-                       REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
-                       REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
-                       /* Set up wordsize = 1 for DMAs. */
-                       DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
-                       DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
-
-                       start_dma_in(port);
-                       port->init_irqs = 0;
-               } else { /* !port->use_dma */
+       } else { /* !port->use_dma */
 #ifdef SYNC_SER_MANUAL
-                       if (port == &ports[0]) {
-                               if (request_irq(SYNCSER_INTR_VECT,
-                                               manual_interrupt,
-                                               0,
-                                               "synchronous serial manual irq",
-                                               &ports[0])) {
-                                       printk("Can't allocate sync serial manual irq");
-                                       goto out;
-                               }
-                       }
-#ifdef CONFIG_ETRAXFS
-                       else if (port == &ports[1]) {
-                               if (request_irq(SSER1_INTR_VECT,
-                                               manual_interrupt,
-                                               0,
-                                               "synchronous serial manual irq",
-                                               &ports[1])) {
-                                       printk(KERN_CRIT "Can't allocate sync serial manual irq");
-                                       goto out;
-                               }
-                       }
-#endif
-                       port->init_irqs = 0;
+               const char *tmp = dev == 0 ? "syncser0 manual irq" :
+                                            "syncser1 manual irq";
+               if (request_irq(port->syncser_intr_vect, manual_interrupt,
+                               0, tmp, port)) {
+                       pr_err("Can't alloc syncser%d manual irq",
+                               dev);
+                       ret = -EBUSY;
+                       goto unlock_and_exit;
+               }
+               port->init_irqs = manual_irq_setup;
 #else
-                       panic("sync_serial: Manual mode not supported.\n");
+               panic("sync_serial: Manual mode not supported\n");
 #endif /* SYNC_SER_MANUAL */
-               }
-
-       } /* port->init_irqs */
-
+       }
        port->busy++;
        ret = 0;
-out:
+
+unlock_and_exit:
        mutex_unlock(&sync_serial_mutex);
        return ret;
 }
@@ -593,18 +541,17 @@ out:
 static int sync_serial_release(struct inode *inode, struct file *file)
 {
        int dev = iminor(inode);
-       sync_port *port;
+       struct sync_port *port;
 
-       if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
-       {
-               DEBUG(printk("Invalid minor %d\n", dev));
+       if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+               DEBUG(pr_info("Invalid minor %d\n", dev));
                return -ENODEV;
        }
        port = &ports[dev];
        if (port->busy)
                port->busy--;
        if (!port->busy)
-          /* XXX */ ;
+               /* XXX */;
        return 0;
 }
 
@@ -612,21 +559,15 @@ static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
 {
        int dev = iminor(file_inode(file));
        unsigned int mask = 0;
-       sync_port *port;
-       DEBUGPOLL( static unsigned int prev_mask = 0; );
+       struct sync_port *port;
+       DEBUGPOLL(
+       static unsigned int prev_mask;
+       );
 
        port = &ports[dev];
 
-       if (!port->started) {
-               reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
-               reg_sser_rw_rec_cfg rec_cfg =
-                       REG_RD(sser, port->regi_sser, rw_rec_cfg);
-               cfg.en = regk_sser_yes;
-               rec_cfg.rec_en = port->input;
-               REG_WR(sser, port->regi_sser, rw_cfg, cfg);
-               REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
-               port->started = 1;
-       }
+       if (!port->started)
+               sync_serial_start_port(port);
 
        poll_wait(file, &port->out_wait_q, wait);
        poll_wait(file, &port->in_wait_q, wait);
@@ -645,33 +586,175 @@ static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
        if (port->input && sync_data_avail(port) >= port->inbufchunk)
                mask |= POLLIN | POLLRDNORM;
 
-       DEBUGPOLL(if (mask != prev_mask)
-             printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
-                    mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
-             prev_mask = mask;
-             );
+       DEBUGPOLL(
+       if (mask != prev_mask)
+               pr_info("sync_serial_poll: mask 0x%08X %s %s\n",
+                       mask,
+                       mask & POLLOUT ? "POLLOUT" : "",
+                       mask & POLLIN ? "POLLIN" : "");
+               prev_mask = mask;
+       );
        return mask;
 }
 
-static int sync_serial_ioctl(struct file *file,
-                 unsigned int cmd, unsigned long arg)
+static ssize_t __sync_serial_read(struct file *file,
+                                 char __user *buf,
+                                 size_t count,
+                                 loff_t *ppos,
+                                 struct timespec *ts)
+{
+       unsigned long flags;
+       int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+       int avail;
+       struct sync_port *port;
+       unsigned char *start;
+       unsigned char *end;
+
+       if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+               DEBUG(pr_info("Invalid minor %d\n", dev));
+               return -ENODEV;
+       }
+       port = &ports[dev];
+
+       if (!port->started)
+               sync_serial_start_port(port);
+
+       /* Calculate number of available bytes */
+       /* Save pointers to avoid that they are modified by interrupt */
+       spin_lock_irqsave(&port->lock, flags);
+       start = port->readp;
+       end = port->writep;
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       while ((start == end) && !port->in_buffer_len) {
+               if (file->f_flags & O_NONBLOCK)
+                       return -EAGAIN;
+
+               wait_event_interruptible(port->in_wait_q,
+                                        !(start == end && !port->full));
+
+               if (signal_pending(current))
+                       return -EINTR;
+
+               spin_lock_irqsave(&port->lock, flags);
+               start = port->readp;
+               end = port->writep;
+               spin_unlock_irqrestore(&port->lock, flags);
+       }
+
+       DEBUGREAD(pr_info("R%d c %d ri %u wi %u /%u\n",
+                         dev, count,
+                         start - port->flip, end - port->flip,
+                         port->in_buffer_size));
+
+       /* Lazy read, never return wrapped data. */
+       if (end > start)
+               avail = end - start;
+       else
+               avail = port->flip + port->in_buffer_size - start;
+
+       count = count > avail ? avail : count;
+       if (copy_to_user(buf, start, count))
+               return -EFAULT;
+
+       /* If timestamp requested, find timestamp of first returned byte
+        * and copy it.
+        * N.B: Applications that request timstamps MUST read data in
+        * chunks that are multiples of IN_DESCR_SIZE.
+        * Otherwise the timestamps will not be aligned to the data read.
+        */
+       if (ts != NULL) {
+               int idx = port->read_ts_idx;
+               memcpy(ts, &port->timestamp[idx], sizeof(struct timespec));
+               port->read_ts_idx += count / IN_DESCR_SIZE;
+               if (port->read_ts_idx >= NBR_IN_DESCR)
+                       port->read_ts_idx = 0;
+       }
+
+       spin_lock_irqsave(&port->lock, flags);
+       port->readp += count;
+       /* Check for wrap */
+       if (port->readp >= port->flip + port->in_buffer_size)
+               port->readp = port->flip;
+       port->in_buffer_len -= count;
+       port->full = 0;
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       DEBUGREAD(pr_info("r %d\n", count));
+
+       return count;
+}
+
+static ssize_t sync_serial_input(struct file *file, unsigned long arg)
+{
+       struct ssp_request req;
+       int count;
+       int ret;
+
+       /* Copy the request structure from user-mode. */
+       ret = copy_from_user(&req, (struct ssp_request __user *)arg,
+               sizeof(struct ssp_request));
+
+       if (ret) {
+               DEBUG(pr_info("sync_serial_input copy from user failed\n"));
+               return -EFAULT;
+       }
+
+       /* To get the timestamps aligned, make sure that 'len'
+        * is a multiple of IN_DESCR_SIZE.
+        */
+       if ((req.len % IN_DESCR_SIZE) != 0) {
+               DEBUG(pr_info("sync_serial: req.len %x, IN_DESCR_SIZE %x\n",
+                             req.len, IN_DESCR_SIZE));
+               return -EFAULT;
+       }
+
+       /* Do the actual read. */
+       /* Note that req.buf is actually a pointer to user space. */
+       count = __sync_serial_read(file, req.buf, req.len,
+                                  NULL, &req.ts);
+
+       if (count < 0) {
+               DEBUG(pr_info("sync_serial_input read failed\n"));
+               return count;
+       }
+
+       /* Copy the request back to user-mode. */
+       ret = copy_to_user((struct ssp_request __user *)arg, &req,
+               sizeof(struct ssp_request));
+
+       if (ret) {
+               DEBUG(pr_info("syncser input copy2user failed\n"));
+               return -EFAULT;
+       }
+
+       /* Return the number of bytes read. */
+       return count;
+}
+
+
+static int sync_serial_ioctl_unlocked(struct file *file,
+                                     unsigned int cmd, unsigned long arg)
 {
        int return_val = 0;
        int dma_w_size = regk_dma_set_w_size1;
        int dev = iminor(file_inode(file));
-       sync_port *port;
+       struct sync_port *port;
        reg_sser_rw_tr_cfg tr_cfg;
        reg_sser_rw_rec_cfg rec_cfg;
        reg_sser_rw_frm_cfg frm_cfg;
        reg_sser_rw_cfg gen_cfg;
        reg_sser_rw_intr_mask intr_mask;
 
-       if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
-       {
-               DEBUG(printk("Invalid minor %d\n", dev));
+       if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
+               DEBUG(pr_info("Invalid minor %d\n", dev));
                return -1;
        }
-        port = &ports[dev];
+
+       if (cmd == SSP_INPUT)
+               return sync_serial_input(file, arg);
+
+       port = &ports[dev];
        spin_lock_irq(&port->lock);
 
        tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
@@ -680,11 +763,9 @@ static int sync_serial_ioctl(struct file *file,
        gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
        intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
 
-       switch(cmd)
-       {
+       switch (cmd) {
        case SSP_SPEED:
-               if (GET_SPEED(arg) == CODEC)
-               {
+               if (GET_SPEED(arg) == CODEC) {
                        unsigned int freq;
 
                        gen_cfg.base_freq = regk_sser_f32;
@@ -701,15 +782,25 @@ static int sync_serial_ioctl(struct file *file,
                        case FREQ_256kHz:
                                gen_cfg.clk_div = 125 *
                                        (1 << (freq - FREQ_256kHz)) - 1;
-                       break;
+                               break;
                        case FREQ_512kHz:
                                gen_cfg.clk_div = 62;
-                       break;
+                               break;
                        case FREQ_1MHz:
                        case FREQ_2MHz:
                        case FREQ_4MHz:
                                gen_cfg.clk_div = 8 * (1 << freq) - 1;
-                       break;
+                               break;
+                       }
+               } else if (GET_SPEED(arg) == CODEC_f32768) {
+                       gen_cfg.base_freq = regk_sser_f32_768;
+                       switch (GET_FREQ(arg)) {
+                       case FREQ_4096kHz:
+                               gen_cfg.clk_div = 7;
+                               break;
+                       default:
+                               spin_unlock_irq(&port->lock);
+                               return -EINVAL;
                        }
                } else {
                        gen_cfg.base_freq = regk_sser_f29_493;
@@ -767,62 +858,64 @@ static int sync_serial_ioctl(struct file *file,
 
                break;
        case SSP_MODE:
-               switch(arg)
-               {
-                       case MASTER_OUTPUT:
-                               port->output = 1;
-                               port->input = 0;
-                               frm_cfg.out_on = regk_sser_tr;
-                               frm_cfg.frame_pin_dir = regk_sser_out;
-                               gen_cfg.clk_dir = regk_sser_out;
-                               break;
-                       case SLAVE_OUTPUT:
-                               port->output = 1;
-                               port->input = 0;
-                               frm_cfg.frame_pin_dir = regk_sser_in;
-                               gen_cfg.clk_dir = regk_sser_in;
-                               break;
-                       case MASTER_INPUT:
-                               port->output = 0;
-                               port->input = 1;
-                               frm_cfg.frame_pin_dir = regk_sser_out;
-                               frm_cfg.out_on = regk_sser_intern_tb;
-                               gen_cfg.clk_dir = regk_sser_out;
-                               break;
-                       case SLAVE_INPUT:
-                               port->output = 0;
-                               port->input = 1;
-                               frm_cfg.frame_pin_dir = regk_sser_in;
-                               gen_cfg.clk_dir = regk_sser_in;
-                               break;
-                       case MASTER_BIDIR:
-                               port->output = 1;
-                               port->input = 1;
-                               frm_cfg.frame_pin_dir = regk_sser_out;
-                               frm_cfg.out_on = regk_sser_intern_tb;
-                               gen_cfg.clk_dir = regk_sser_out;
-                               break;
-                       case SLAVE_BIDIR:
-                               port->output = 1;
-                               port->input = 1;
-                               frm_cfg.frame_pin_dir = regk_sser_in;
-                               gen_cfg.clk_dir = regk_sser_in;
-                               break;
-                       default:
-                               spin_unlock_irq(&port->lock);
-                               return -EINVAL;
+               switch (arg) {
+               case MASTER_OUTPUT:
+                       port->output = 1;
+                       port->input = 0;
+                       frm_cfg.out_on = regk_sser_tr;
+                       frm_cfg.frame_pin_dir = regk_sser_out;
+                       gen_cfg.clk_dir = regk_sser_out;
+                       break;
+               case SLAVE_OUTPUT:
+                       port->output = 1;
+                       port->input = 0;
+                       frm_cfg.frame_pin_dir = regk_sser_in;
+                       gen_cfg.clk_dir = regk_sser_in;
+                       break;
+               case MASTER_INPUT:
+                       port->output = 0;
+                       port->input = 1;
+                       frm_cfg.frame_pin_dir = regk_sser_out;
+                       frm_cfg.out_on = regk_sser_intern_tb;
+                       gen_cfg.clk_dir = regk_sser_out;
+                       break;
+               case SLAVE_INPUT:
+                       port->output = 0;
+                       port->input = 1;
+                       frm_cfg.frame_pin_dir = regk_sser_in;
+                       gen_cfg.clk_dir = regk_sser_in;
+                       break;
+               case MASTER_BIDIR:
+                       port->output = 1;
+                       port->input = 1;
+                       frm_cfg.frame_pin_dir = regk_sser_out;
+                       frm_cfg.out_on = regk_sser_intern_tb;
+                       gen_cfg.clk_dir = regk_sser_out;
+                       break;
+               case SLAVE_BIDIR:
+                       port->output = 1;
+                       port->input = 1;
+                       frm_cfg.frame_pin_dir = regk_sser_in;
+                       gen_cfg.clk_dir = regk_sser_in;
+                       break;
+               default:
+                       spin_unlock_irq(&port->lock);
+                       return -EINVAL;
                }
-               if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
+               if (!port->use_dma || arg == MASTER_OUTPUT ||
+                               arg == SLAVE_OUTPUT)
                        intr_mask.rdav = regk_sser_yes;
                break;
        case SSP_FRAME_SYNC:
                if (arg & NORMAL_SYNC) {
                        frm_cfg.rec_delay = 1;
                        frm_cfg.tr_delay = 1;
-               }
-               else if (arg & EARLY_SYNC)
+               } else if (arg & EARLY_SYNC)
                        frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
-               else if (arg & SECOND_WORD_SYNC) {
+               else if (arg & LATE_SYNC) {
+                       frm_cfg.tr_delay = 2;
+                       frm_cfg.rec_delay = 2;
+               } else if (arg & SECOND_WORD_SYNC) {
                        frm_cfg.rec_delay = 7;
                        frm_cfg.tr_delay = 1;
                }
@@ -914,15 +1007,12 @@ static int sync_serial_ioctl(struct file *file,
                frm_cfg.type = regk_sser_level;
                frm_cfg.tr_delay = 1;
                frm_cfg.level = regk_sser_neg_lo;
-               if (arg & SPI_SLAVE)
-               {
+               if (arg & SPI_SLAVE) {
                        rec_cfg.clk_pol = regk_sser_neg;
                        gen_cfg.clk_dir = regk_sser_in;
                        port->input = 1;
                        port->output = 0;
-               }
-               else
-               {
+               } else {
                        gen_cfg.out_clk_pol = regk_sser_pos;
                        port->input = 0;
                        port->output = 1;
@@ -965,19 +1055,19 @@ static int sync_serial_ioctl(struct file *file,
 }
 
 static long sync_serial_ioctl(struct file *file,
-                             unsigned int cmd, unsigned long arg)
+               unsigned int cmd, unsigned long arg)
 {
-       long ret;
+       long ret;
 
-       mutex_lock(&sync_serial_mutex);
-       ret = sync_serial_ioctl_unlocked(file, cmd, arg);
-       mutex_unlock(&sync_serial_mutex);
+       mutex_lock(&sync_serial_mutex);
+       ret = sync_serial_ioctl_unlocked(file, cmd, arg);
+       mutex_unlock(&sync_serial_mutex);
 
-       return ret;
+       return ret;
 }
 
 /* NOTE: sync_serial_write does not support concurrency */
-static ssize_t sync_serial_write(struct file *file, const char *buf,
+static ssize_t sync_serial_write(struct file *file, const char __user *buf,
                                 size_t count, loff_t *ppos)
 {
        int dev = iminor(file_inode(file));
@@ -993,7 +1083,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
        unsigned char *buf_stop_ptr; /* Last byte + 1 */
 
        if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
-               DEBUG(printk("Invalid minor %d\n", dev));
+               DEBUG(pr_info("Invalid minor %d\n", dev));
                return -ENODEV;
        }
        port = &ports[dev];
@@ -1006,9 +1096,9 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
         * |_________|___________________|________________________|
         *           ^ rd_ptr            ^ wr_ptr
         */
-       DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
-                         port->port_nbr, count, port->active_tr_descr,
-                         port->catch_tr_descr));
+       DEBUGWRITE(pr_info("W d%d c %u a: %p c: %p\n",
+                          port->port_nbr, count, port->active_tr_descr,
+                          port->catch_tr_descr));
 
        /* Read variables that may be updated by interrupts */
        spin_lock_irqsave(&port->lock, flags);
@@ -1020,7 +1110,7 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
        if (port->tr_running &&
            ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
             out_buf_count >= OUT_BUFFER_SIZE)) {
-               DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
+               DEBUGWRITE(pr_info("sser%d full\n", dev));
                return -EAGAIN;
        }
 
@@ -1043,15 +1133,16 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
        if (copy_from_user(wr_ptr, buf, trunc_count))
                return -EFAULT;
 
-       DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d     %p %p %p\n",
-                          out_buf_count, trunc_count,
-                          port->out_buf_count, port->out_buffer,
-                          wr_ptr, buf_stop_ptr));
+       DEBUGOUTBUF(pr_info("%-4d + %-4d = %-4d     %p %p %p\n",
+                           out_buf_count, trunc_count,
+                           port->out_buf_count, port->out_buffer,
+                           wr_ptr, buf_stop_ptr));
 
        /* Make sure transmitter/receiver is running */
        if (!port->started) {
                reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
-               reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
+               reg_sser_rw_rec_cfg rec_cfg =
+                       REG_RD(sser, port->regi_sser, rw_rec_cfg);
                cfg.en = regk_sser_yes;
                rec_cfg.rec_en = port->input;
                REG_WR(sser, port->regi_sser, rw_cfg, cfg);
@@ -1068,8 +1159,11 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
        spin_lock_irqsave(&port->lock, flags);
        port->out_buf_count += trunc_count;
        if (port->use_dma) {
+#ifdef SYNC_SER_DMA
                start_dma_out(port, wr_ptr, trunc_count);
+#endif
        } else if (!port->tr_running) {
+#ifdef SYNC_SER_MANUAL
                reg_sser_rw_intr_mask intr_mask;
                intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
                /* Start sender by writing data */
@@ -1077,14 +1171,15 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
                /* and enable transmitter ready IRQ */
                intr_mask.trdy = 1;
                REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
+#endif
        }
        spin_unlock_irqrestore(&port->lock, flags);
 
        /* Exit if non blocking */
        if (file->f_flags & O_NONBLOCK) {
-               DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu  %08x\n",
-                                 port->port_nbr, trunc_count,
-                                 REG_RD_INT(dma, port->regi_dmaout, r_intr)));
+               DEBUGWRITE(pr_info("w d%d c %u  %08x\n",
+                                  port->port_nbr, trunc_count,
+                                  REG_RD_INT(dma, port->regi_dmaout, r_intr)));
                return trunc_count;
        }
 
@@ -1094,105 +1189,32 @@ static ssize_t sync_serial_write(struct file *file, const char *buf,
        if (signal_pending(current))
                return -EINTR;
 
-       DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
-                         port->port_nbr, trunc_count));
+       DEBUGWRITE(pr_info("w d%d c %u\n", port->port_nbr, trunc_count));
        return trunc_count;
 }
 
-static ssize_t sync_serial_read(struct file * file, char * buf,
+static ssize_t sync_serial_read(struct file *file, char __user *buf,
                                size_t count, loff_t *ppos)
 {
-       int dev = iminor(file_inode(file));
-       int avail;
-       sync_port *port;
-       unsigned char* start;
-       unsigned char* end;
-       unsigned long flags;
-
-       if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
-       {
-               DEBUG(printk("Invalid minor %d\n", dev));
-               return -ENODEV;
-       }
-       port = &ports[dev];
-
-       DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
-
-       if (!port->started)
-       {
-               reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
-               reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
-               reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
-               cfg.en = regk_sser_yes;
-               tr_cfg.tr_en = regk_sser_yes;
-               rec_cfg.rec_en = regk_sser_yes;
-               REG_WR(sser, port->regi_sser, rw_cfg, cfg);
-               REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
-               REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
-               port->started = 1;
-       }
-
-       /* Calculate number of available bytes */
-       /* Save pointers to avoid that they are modified by interrupt */
-       spin_lock_irqsave(&port->lock, flags);
-       start = (unsigned char*)port->readp; /* cast away volatile */
-       end = (unsigned char*)port->writep;  /* cast away volatile */
-       spin_unlock_irqrestore(&port->lock, flags);
-       while ((start == end) && !port->full) /* No data */
-       {
-               DEBUGREAD(printk(KERN_DEBUG "&"));
-               if (file->f_flags & O_NONBLOCK)
-                       return -EAGAIN;
-
-               wait_event_interruptible(port->in_wait_q,
-                                        !(start == end && !port->full));
-               if (signal_pending(current))
-                       return -EINTR;
-
-               spin_lock_irqsave(&port->lock, flags);
-               start = (unsigned char*)port->readp; /* cast away volatile */
-               end = (unsigned char*)port->writep;  /* cast away volatile */
-               spin_unlock_irqrestore(&port->lock, flags);
-       }
-
-       /* Lazy read, never return wrapped data. */
-       if (port->full)
-               avail = port->in_buffer_size;
-       else if (end > start)
-               avail = end - start;
-       else
-               avail = port->flip + port->in_buffer_size - start;
-
-       count = count > avail ? avail : count;
-       if (copy_to_user(buf, start, count))
-               return -EFAULT;
-       /* Disable interrupts while updating readp */
-       spin_lock_irqsave(&port->lock, flags);
-       port->readp += count;
-       if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
-               port->readp = port->flip;
-       port->full = 0;
-       spin_unlock_irqrestore(&port->lock, flags);
-       DEBUGREAD(printk("r %d\n", count));
-       return count;
+       return __sync_serial_read(file, buf, count, ppos, NULL);
 }
 
-static void send_word(sync_port* port)
+#ifdef SYNC_SER_MANUAL
+static void send_word(struct sync_port *port)
 {
        reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
        reg_sser_rw_tr_data tr_data =  {0};
 
-       switch(tr_cfg.sample_size)
+       switch (tr_cfg.sample_size) {
+       case 8:
+               port->out_buf_count--;
+               tr_data.data = *port->out_rd_ptr++;
+               REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
+               if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
+                       port->out_rd_ptr = port->out_buffer;
+               break;
+       case 12:
        {
-        case 8:
-                port->out_buf_count--;
-                tr_data.data = *port->out_rd_ptr++;
-                REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
-                if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
-                        port->out_rd_ptr = port->out_buffer;
-                break;
-        case 12:
-        {
                int data = (*port->out_rd_ptr++) << 8;
                data |= *port->out_rd_ptr++;
                port->out_buf_count -= 2;
@@ -1200,8 +1222,8 @@ static void send_word(sync_port* port)
                REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
                if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
                        port->out_rd_ptr = port->out_buffer;
+               break;
        }
-       break;
        case 16:
                port->out_buf_count -= 2;
                tr_data.data = *(unsigned short *)port->out_rd_ptr;
@@ -1233,27 +1255,28 @@ static void send_word(sync_port* port)
                break;
        }
 }
+#endif
 
-static void start_dma_out(struct sync_port *port,
-                         const char *data, int count)
+#ifdef SYNC_SER_DMA
+static void start_dma_out(struct sync_port *port, const char *data, int count)
 {
-       port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
+       port->active_tr_descr->buf = (char *)virt_to_phys((char *)data);
        port->active_tr_descr->after = port->active_tr_descr->buf + count;
        port->active_tr_descr->intr = 1;
 
        port->active_tr_descr->eol = 1;
        port->prev_tr_descr->eol = 0;
 
-       DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
+       DEBUGTRDMA(pr_info("Inserting eolr:%p eol@:%p\n",
                port->prev_tr_descr, port->active_tr_descr));
        port->prev_tr_descr = port->active_tr_descr;
-       port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
+       port->active_tr_descr = phys_to_virt((int)port->active_tr_descr->next);
 
        if (!port->tr_running) {
                reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
                        rw_tr_cfg);
 
-               port->out_context.next = 0;
+               port->out_context.next = NULL;
                port->out_context.saved_data =
                        (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
                port->out_context.saved_data_buf = port->prev_tr_descr->buf;
@@ -1263,57 +1286,58 @@ static void start_dma_out(struct sync_port *port,
 
                tr_cfg.tr_en = regk_sser_yes;
                REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
-               DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
+               DEBUGTRDMA(pr_info(KERN_INFO "dma s\n"););
        } else {
                DMA_CONTINUE_DATA(port->regi_dmaout);
-               DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
+               DEBUGTRDMA(pr_info("dma c\n"););
        }
 
        port->tr_running = 1;
 }
 
-static void start_dma_in(sync_port *port)
+static void start_dma_in(struct sync_port *port)
 {
        int i;
        char *buf;
+       unsigned long flags;
+       spin_lock_irqsave(&port->lock, flags);
        port->writep = port->flip;
+       spin_unlock_irqrestore(&port->lock, flags);
 
-       if (port->writep > port->flip + port->in_buffer_size) {
-               panic("Offset too large in sync serial driver\n");
-               return;
-       }
-       buf = (char*)virt_to_phys(port->in_buffer);
+       buf = (char *)virt_to_phys(port->in_buffer);
        for (i = 0; i < NBR_IN_DESCR; i++) {
                port->in_descr[i].buf = buf;
                port->in_descr[i].after = buf + port->inbufchunk;
                port->in_descr[i].intr = 1;
-               port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
+               port->in_descr[i].next =
+                       (dma_descr_data *)virt_to_phys(&port->in_descr[i+1]);
                port->in_descr[i].buf = buf;
                buf += port->inbufchunk;
        }
        /* Link the last descriptor to the first */
-       port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
+       port->in_descr[i-1].next =
+               (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
        port->in_descr[i-1].eol = regk_sser_yes;
        port->next_rx_desc = &port->in_descr[0];
        port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
-       port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
+       port->in_context.saved_data =
+               (dma_descr_data *)virt_to_phys(&port->in_descr[0]);
        port->in_context.saved_data_buf = port->in_descr[0].buf;
        DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
 }
 
-#ifdef SYNC_SER_DMA
 static irqreturn_t tr_interrupt(int irq, void *dev_id)
 {
        reg_dma_r_masked_intr masked;
-       reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
+       reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
        reg_dma_rw_stat stat;
        int i;
        int found = 0;
        int stop_sser = 0;
 
        for (i = 0; i < NBR_PORTS; i++) {
-               sync_port *port = &ports[i];
-               if (!port->enabled  || !port->use_dma)
+               struct sync_port *port = &ports[i];
+               if (!port->enabled || !port->use_dma)
                        continue;
 
                /* IRQ active for the port? */
@@ -1338,19 +1362,20 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
                        int sent;
                        sent = port->catch_tr_descr->after -
                                port->catch_tr_descr->buf;
-                       DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
-                                         "in descr %p (ac: %p)\n",
-                                         port->out_buf_count, sent,
-                                         port->out_buf_count - sent,
-                                         port->catch_tr_descr,
-                                         port->active_tr_descr););
+                       DEBUGTXINT(pr_info("%-4d - %-4d = %-4d\t"
+                                          "in descr %p (ac: %p)\n",
+                                          port->out_buf_count, sent,
+                                          port->out_buf_count - sent,
+                                          port->catch_tr_descr,
+                                          port->active_tr_descr););
                        port->out_buf_count -= sent;
                        port->catch_tr_descr =
                                phys_to_virt((int) port->catch_tr_descr->next);
                        port->out_rd_ptr =
                                phys_to_virt((int) port->catch_tr_descr->buf);
                } else {
-                       int i, sent;
+                       reg_sser_rw_tr_cfg tr_cfg;
+                       int j, sent;
                        /* EOL handler.
                         * Note that if an EOL was encountered during the irq
                         * locked section of sync_ser_write the DMA will be
@@ -1358,11 +1383,11 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
                         * The remaining descriptors will be traversed by
                         * the descriptor interrupts as usual.
                         */
-                       i = 0;
+                       j = 0;
                        while (!port->catch_tr_descr->eol) {
                                sent = port->catch_tr_descr->after -
                                        port->catch_tr_descr->buf;
-                               DEBUGOUTBUF(printk(KERN_DEBUG
+                               DEBUGOUTBUF(pr_info(
                                        "traversing descr %p -%d (%d)\n",
                                        port->catch_tr_descr,
                                        sent,
@@ -1370,16 +1395,15 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
                                port->out_buf_count -= sent;
                                port->catch_tr_descr = phys_to_virt(
                                        (int)port->catch_tr_descr->next);
-                               i++;
-                               if (i >= NBR_OUT_DESCR) {
+                               j++;
+                               if (j >= NBR_OUT_DESCR) {
                                        /* TODO: Reset and recover */
                                        panic("sync_serial: missing eol");
                                }
                        }
                        sent = port->catch_tr_descr->after -
                                port->catch_tr_descr->buf;
-                       DEBUGOUTBUF(printk(KERN_DEBUG
-                               "eol at descr %p -%d (%d)\n",
+                       DEBUGOUTBUF(pr_info("eol at descr %p -%d (%d)\n",
                                port->catch_tr_descr,
                                sent,
                                port->out_buf_count));
@@ -1394,15 +1418,13 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
                                        OUT_BUFFER_SIZE)
                                port->out_rd_ptr = port->out_buffer;
 
-                       reg_sser_rw_tr_cfg tr_cfg =
-                               REG_RD(sser, port->regi_sser, rw_tr_cfg);
-                       DEBUGTXINT(printk(KERN_DEBUG
+                       tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
+                       DEBUGTXINT(pr_info(
                                "tr_int DMA stop %d, set catch @ %p\n",
                                port->out_buf_count,
                                port->active_tr_descr));
                        if (port->out_buf_count != 0)
-                               printk(KERN_CRIT "sync_ser: buffer not "
-                                       "empty after eol.\n");
+                               pr_err("sync_ser: buf not empty after eol\n");
                        port->catch_tr_descr = port->active_tr_descr;
                        port->tr_running = 0;
                        tr_cfg.tr_en = regk_sser_no;
@@ -1414,62 +1436,79 @@ static irqreturn_t tr_interrupt(int irq, void *dev_id)
        return IRQ_RETVAL(found);
 } /* tr_interrupt */
 
+
+static inline void handle_rx_packet(struct sync_port *port)
+{
+       int idx;
+       reg_dma_rw_ack_intr ack_intr = { .data = regk_dma_yes };
+       unsigned long flags;
+
+       DEBUGRXINT(pr_info(KERN_INFO "!"));
+       spin_lock_irqsave(&port->lock, flags);
+
+       /* If we overrun the user experience is crap regardless if we
+        * drop new or old data. Its much easier to get it right when
+        * dropping new data so lets do that.
+        */
+       if ((port->writep + port->inbufchunk <=
+            port->flip + port->in_buffer_size) &&
+           (port->in_buffer_len + port->inbufchunk < IN_BUFFER_SIZE)) {
+               memcpy(port->writep,
+                      phys_to_virt((unsigned)port->next_rx_desc->buf),
+                      port->inbufchunk);
+               port->writep += port->inbufchunk;
+               if (port->writep >= port->flip + port->in_buffer_size)
+                       port->writep = port->flip;
+
+               /* Timestamp the new data chunk. */
+               if (port->write_ts_idx == NBR_IN_DESCR)
+                       port->write_ts_idx = 0;
+               idx = port->write_ts_idx++;
+               do_posix_clock_monotonic_gettime(&port->timestamp[idx]);
+               port->in_buffer_len += port->inbufchunk;
+       }
+       spin_unlock_irqrestore(&port->lock, flags);
+
+       port->next_rx_desc->eol = 1;
+       port->prev_rx_desc->eol = 0;
+       /* Cache bug workaround */
+       flush_dma_descr(port->prev_rx_desc, 0);
+       port->prev_rx_desc = port->next_rx_desc;
+       port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
+       /* Cache bug workaround */
+       flush_dma_descr(port->prev_rx_desc, 1);
+       /* wake up the waiting process */
+       wake_up_interruptible(&port->in_wait_q);
+       DMA_CONTINUE(port->regi_dmain);
+       REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
+
+}
+
 static irqreturn_t rx_interrupt(int irq, void *dev_id)
 {
        reg_dma_r_masked_intr masked;
-       reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
 
        int i;
        int found = 0;
 
-       for (i = 0; i < NBR_PORTS; i++)
-       {
-               sync_port *port = &ports[i];
+       DEBUG(pr_info("rx_interrupt\n"));
+
+       for (i = 0; i < NBR_PORTS; i++) {
+               struct sync_port *port = &ports[i];
 
-               if (!port->enabled || !port->use_dma )
+               if (!port->enabled || !port->use_dma)
                        continue;
 
                masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
 
-               if (masked.data) /* Descriptor interrupt */
-               {
-                       found = 1;
-                       while (REG_RD(dma, port->regi_dmain, rw_data) !=
-                              virt_to_phys(port->next_rx_desc)) {
-                               DEBUGRXINT(printk(KERN_DEBUG "!"));
-                               if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
-                                       int first_size = port->flip + port->in_buffer_size - port->writep;
-                                       memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
-                                       memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
-                                       port->writep = port->flip + port->inbufchunk - first_size;
-                               } else {
-                                       memcpy((char*)port->writep,
-                                              phys_to_virt((unsigned)port->next_rx_desc->buf),
-                                              port->inbufchunk);
-                                       port->writep += port->inbufchunk;
-                                       if (port->writep >= port->flip + port->in_buffer_size)
-                                               port->writep = port->flip;
-                               }
-                                if (port->writep == port->readp)
-                                {
-                                 port->full = 1;
-                                }
-
-                               port->next_rx_desc->eol = 1;
-                               port->prev_rx_desc->eol = 0;
-                               /* Cache bug workaround */
-                               flush_dma_descr(port->prev_rx_desc, 0);
-                               port->prev_rx_desc = port->next_rx_desc;
-                               port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
-                               /* Cache bug workaround */
-                               flush_dma_descr(port->prev_rx_desc, 1);
-                               /* wake up the waiting process */
-                               wake_up_interruptible(&port->in_wait_q);
-                               DMA_CONTINUE(port->regi_dmain);
-                               REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
+               if (!masked.data)
+                       continue;
 
-                       }
-               }
+               /* Descriptor interrupt */
+               found = 1;
+               while (REG_RD(dma, port->regi_dmain, rw_data) !=
+                               virt_to_phys(port->next_rx_desc))
+                       handle_rx_packet(port);
        }
        return IRQ_RETVAL(found);
 } /* rx_interrupt */
@@ -1478,75 +1517,83 @@ static irqreturn_t rx_interrupt(int irq, void *dev_id)
 #ifdef SYNC_SER_MANUAL
 static irqreturn_t manual_interrupt(int irq, void *dev_id)
 {
+       unsigned long flags;
        int i;
        int found = 0;
        reg_sser_r_masked_intr masked;
 
-       for (i = 0; i < NBR_PORTS; i++)
-       {
-               sync_port *port = &ports[i];
+       for (i = 0; i < NBR_PORTS; i++) {
+               struct sync_port *port = &ports[i];
 
                if (!port->enabled || port->use_dma)
-               {
                        continue;
-               }
 
                masked = REG_RD(sser, port->regi_sser, r_masked_intr);
-               if (masked.rdav)        /* Data received? */
-               {
-                       reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
-                       reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
+               /* Data received? */
+               if (masked.rdav) {
+                       reg_sser_rw_rec_cfg rec_cfg =
+                               REG_RD(sser, port->regi_sser, rw_rec_cfg);
+                       reg_sser_r_rec_data data = REG_RD(sser,
+                               port->regi_sser, r_rec_data);
                        found = 1;
                        /* Read data */
-                       switch(rec_cfg.sample_size)
-                       {
+                       spin_lock_irqsave(&port->lock, flags);
+                       switch (rec_cfg.sample_size) {
                        case 8:
                                *port->writep++ = data.data & 0xff;
                                break;
                        case 12:
                                *port->writep = (data.data & 0x0ff0) >> 4;
                                *(port->writep + 1) = data.data & 0x0f;
-                               port->writep+=2;
+                               port->writep += 2;
                                break;
                        case 16:
-                               *(unsigned short*)port->writep = data.data;
-                               port->writep+=2;
+                               *(unsigned short *)port->writep = data.data;
+                               port->writep += 2;
                                break;
                        case 24:
-                               *(unsigned int*)port->writep = data.data;
-                               port->writep+=3;
+                               *(unsigned int *)port->writep = data.data;
+                               port->writep += 3;
                                break;
                        case 32:
-                               *(unsigned int*)port->writep = data.data;
-                               port->writep+=4;
+                               *(unsigned int *)port->writep = data.data;
+                               port->writep += 4;
                                break;
                        }
 
-                       if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
+                       /* Wrap? */
+                       if (port->writep >= port->flip + port->in_buffer_size)
                                port->writep = port->flip;
                        if (port->writep == port->readp) {
-                               /* receive buffer overrun, discard oldest data
-                                */
+                               /* Receive buf overrun, discard oldest data */
                                port->readp++;
-                               if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
+                               /* Wrap? */
+                               if (port->readp >= port->flip +
+                                               port->in_buffer_size)
                                        port->readp = port->flip;
                        }
+                       spin_unlock_irqrestore(&port->lock, flags);
                        if (sync_data_avail(port) >= port->inbufchunk)
-                               wake_up_interruptible(&port->in_wait_q); /* Wake up application */
+                               /* Wake up application */
+                               wake_up_interruptible(&port->in_wait_q);
                }
 
-               if (masked.trdy) /* Transmitter ready? */
-               {
+               /* Transmitter ready? */
+               if (masked.trdy) {
                        found = 1;
-                       if (port->out_buf_count > 0) /* More data to send */
+                       /* More data to send */
+                       if (port->out_buf_count > 0)
                                send_word(port);
-                       else /* transmission finished */
-                       {
+                       else {
+                               /* Transmission finished */
                                reg_sser_rw_intr_mask intr_mask;
-                               intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
+                               intr_mask = REG_RD(sser, port->regi_sser,
+                                       rw_intr_mask);
                                intr_mask.trdy = 0;
-                               REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
-                               wake_up_interruptible(&port->out_wait_q); /* Wake up application */
+                               REG_WR(sser, port->regi_sser,
+                                       rw_intr_mask, intr_mask);
+                               /* Wake up application */
+                               wake_up_interruptible(&port->out_wait_q);
                        }
                }
        }
@@ -1554,4 +1601,109 @@ static irqreturn_t manual_interrupt(int irq, void *dev_id)
 }
 #endif
 
+static int __init etrax_sync_serial_init(void)
+{
+#if 1
+       /* This code will be removed when we move to udev for all devices. */
+       syncser_first = MKDEV(SYNC_SERIAL_MAJOR, 0);
+       if (register_chrdev_region(syncser_first, minor_count, SYNCSER_NAME)) {
+               pr_err("Failed to register major %d\n", SYNC_SERIAL_MAJOR);
+               return -1;
+       }
+#else
+       /* Allocate dynamic major number. */
+       if (alloc_chrdev_region(&syncser_first, 0, minor_count, SYNCSER_NAME)) {
+               pr_err("Failed to allocate character device region\n");
+               return -1;
+       }
+#endif
+       syncser_cdev = cdev_alloc();
+       if (!syncser_cdev) {
+               pr_err("Failed to allocate cdev for syncser\n");
+               unregister_chrdev_region(syncser_first, minor_count);
+               return -1;
+       }
+       cdev_init(syncser_cdev, &syncser_fops);
+
+       /* Create a sysfs class for syncser */
+       syncser_class = class_create(THIS_MODULE, "syncser_class");
+
+       /* Initialize Ports */
+#if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
+       if (artpec_pinmux_alloc_fixed(PINMUX_SSER0)) {
+               pr_warn("Unable to alloc pins for synchronous serial port 0\n");
+               unregister_chrdev_region(syncser_first, minor_count);
+               return -EIO;
+       }
+       initialize_port(0);
+       ports[0].enabled = 1;
+       /* Register with sysfs so udev can pick it up. */
+       device_create(syncser_class, NULL, syncser_first, NULL,
+                     "%s%d", SYNCSER_NAME, 0);
+#endif
+
+#if defined(CONFIG_ETRAXFS) && defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
+       if (artpec_pinmux_alloc_fixed(PINMUX_SSER1)) {
+               pr_warn("Unable to alloc pins for synchronous serial port 1\n");
+               unregister_chrdev_region(syncser_first, minor_count);
+               class_destroy(syncser_class);
+               return -EIO;
+       }
+       initialize_port(1);
+       ports[1].enabled = 1;
+       /* Register with sysfs so udev can pick it up. */
+       device_create(syncser_class, NULL, syncser_first, NULL,
+                     "%s%d", SYNCSER_NAME, 0);
+#endif
+
+       /* Add it to system */
+       if (cdev_add(syncser_cdev, syncser_first, minor_count) < 0) {
+               pr_err("Failed to add syncser as char device\n");
+               device_destroy(syncser_class, syncser_first);
+               class_destroy(syncser_class);
+               cdev_del(syncser_cdev);
+               unregister_chrdev_region(syncser_first, minor_count);
+               return -1;
+       }
+
+
+       pr_info("ARTPEC synchronous serial port (%s: %d, %d)\n",
+               SYNCSER_NAME, MAJOR(syncser_first), MINOR(syncser_first));
+
+       return 0;
+}
+
+static void __exit etrax_sync_serial_exit(void)
+{
+       int i;
+       device_destroy(syncser_class, syncser_first);
+       class_destroy(syncser_class);
+
+       if (syncser_cdev) {
+               cdev_del(syncser_cdev);
+               unregister_chrdev_region(syncser_first, minor_count);
+       }
+       for (i = 0; i < NBR_PORTS; i++) {
+               struct sync_port *port = &ports[i];
+               if (port->init_irqs == dma_irq_setup) {
+                       /* Free dma irqs and dma channels. */
+#ifdef SYNC_SER_DMA
+                       artpec_free_dma(port->dma_in_nbr);
+                       artpec_free_dma(port->dma_out_nbr);
+                       free_irq(port->dma_out_intr_vect, port);
+                       free_irq(port->dma_in_intr_vect, port);
+#endif
+               } else if (port->init_irqs == manual_irq_setup) {
+                       /* Free manual irq. */
+                       free_irq(port->syncser_intr_vect, port);
+               }
+       }
+
+       pr_info("ARTPEC synchronous serial port unregistered\n");
+}
+
 module_init(etrax_sync_serial_init);
+module_exit(etrax_sync_serial_exit);
+
+MODULE_LICENSE("GPL");
+
index 610909b003f61eb9b5f1ba0c33946cb3886283df..02e33ebe51ec50b9a71d5cd1205df1168a5fe848 100644 (file)
@@ -3,7 +3,9 @@
  */
 
 #include <linux/console.h>
+#include <linux/kernel.h>
 #include <linux/init.h>
+#include <linux/string.h>
 #include <hwregs/reg_rdwr.h>
 #include <hwregs/reg_map.h>
 #include <hwregs/ser_defs.h>
@@ -65,6 +67,7 @@ struct dbg_port ports[] =
   },
 #endif
 };
+
 static struct dbg_port *port =
 #if defined(CONFIG_ETRAX_DEBUG_PORT0)
        &ports[0];
@@ -97,14 +100,19 @@ static struct dbg_port *kgdb_port =
 #endif
 #endif
 
-static void
-start_port(struct dbg_port* p)
+static void start_port(struct dbg_port *p)
 {
-       if (!p)
-               return;
+       /* Set up serial port registers */
+       reg_ser_rw_tr_ctrl tr_ctrl = {0};
+       reg_ser_rw_tr_dma_en tr_dma_en = {0};
 
-       if (p->started)
+       reg_ser_rw_rec_ctrl rec_ctrl = {0};
+       reg_ser_rw_tr_baud_div tr_baud_div = {0};
+       reg_ser_rw_rec_baud_div rec_baud_div = {0};
+
+       if (!p || p->started)
                return;
+
        p->started = 1;
 
        if (p->nbr == 1)
@@ -118,36 +126,24 @@ start_port(struct dbg_port* p)
                crisv32_pinmux_alloc_fixed(pinmux_ser4);
 #endif
 
-       /* Set up serial port registers */
-       reg_ser_rw_tr_ctrl tr_ctrl = {0};
-       reg_ser_rw_tr_dma_en tr_dma_en = {0};
-
-       reg_ser_rw_rec_ctrl rec_ctrl = {0};
-       reg_ser_rw_tr_baud_div tr_baud_div = {0};
-       reg_ser_rw_rec_baud_div rec_baud_div = {0};
-
        tr_ctrl.base_freq = rec_ctrl.base_freq = regk_ser_f29_493;
        tr_dma_en.en = rec_ctrl.dma_mode = regk_ser_no;
        tr_baud_div.div = rec_baud_div.div = 29493000 / p->baudrate / 8;
        tr_ctrl.en = rec_ctrl.en = 1;
 
-       if (p->parity == 'O')
-       {
+       if (p->parity == 'O') {
                tr_ctrl.par_en = regk_ser_yes;
                tr_ctrl.par = regk_ser_odd;
                rec_ctrl.par_en = regk_ser_yes;
                rec_ctrl.par = regk_ser_odd;
-       }
-       else if (p->parity == 'E')
-       {
+       } else if (p->parity == 'E') {
                tr_ctrl.par_en = regk_ser_yes;
                tr_ctrl.par = regk_ser_even;
                rec_ctrl.par_en = regk_ser_yes;
                rec_ctrl.par = regk_ser_odd;
        }
 
-       if (p->bits == 7)
-       {
+       if (p->bits == 7) {
                tr_ctrl.data_bits = regk_ser_bits7;
                rec_ctrl.data_bits = regk_ser_bits7;
        }
@@ -161,8 +157,7 @@ start_port(struct dbg_port* p)
 
 #ifdef CONFIG_ETRAX_KGDB
 /* Use polling to get a single character from the kernel debug port */
-int
-getDebugChar(void)
+int getDebugChar(void)
 {
        reg_ser_rs_stat_din stat;
        reg_ser_rw_ack_intr ack_intr = { 0 };
@@ -179,8 +174,7 @@ getDebugChar(void)
 }
 
 /* Use polling to put a single character to the kernel debug port */
-void
-putDebugChar(int val)
+void putDebugChar(int val)
 {
        reg_ser_r_stat_din stat;
        do {
@@ -190,12 +184,48 @@ putDebugChar(int val)
 }
 #endif /* CONFIG_ETRAX_KGDB */
 
+static void __init early_putch(int c)
+{
+       reg_ser_r_stat_din stat;
+       /* Wait until transmitter is ready and send. */
+       do
+               stat = REG_RD(ser, port->instance, r_stat_din);
+       while (!stat.tr_rdy);
+       REG_WR_INT(ser, port->instance, rw_dout, c);
+}
+
+static void __init
+early_console_write(struct console *con, const char *s, unsigned n)
+{
+       extern void reset_watchdog(void);
+       int i;
+
+       /* Send data. */
+       for (i = 0; i < n; i++) {
+               /* TODO: the '\n' -> '\n\r' translation should be done at the
+                  receiver. Remove it when the serial driver removes it.   */
+               if (s[i] == '\n')
+                       early_putch('\r');
+               early_putch(s[i]);
+               reset_watchdog();
+       }
+}
+
+static struct console early_console_dev __initdata = {
+       .name   = "early",
+       .write  = early_console_write,
+       .flags  = CON_PRINTBUFFER | CON_BOOT,
+       .index  = -1
+};
+
 /* Register console for printk's, etc. */
-int __init
-init_etrax_debug(void)
+int __init init_etrax_debug(void)
 {
         start_port(port);
 
+       /* Register an early console if a debug port was chosen.  */
+       register_console(&early_console_dev);
+
 #ifdef CONFIG_ETRAX_KGDB
        start_port(kgdb_port);
 #endif /* CONFIG_ETRAX_KGDB */
index ee66866538f891a7ff574efd4b82d836b4b3f894..eb74dabbeb96f31cc8279383655b5441b4acaf09 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/init.h>
 #include <linux/threads.h>
 #include <linux/cpufreq.h>
+#include <linux/mm.h>
 #include <asm/types.h>
 #include <asm/signal.h>
 #include <asm/io.h>
@@ -56,7 +57,6 @@ static int __init etrax_init_cont_rotime(void)
 }
 arch_initcall(etrax_init_cont_rotime);
 
-
 unsigned long timer_regs[NR_CPUS] =
 {
        regi_timer0,
@@ -68,9 +68,8 @@ unsigned long timer_regs[NR_CPUS] =
 extern int set_rtc_mmss(unsigned long nowtime);
 
 #ifdef CONFIG_CPU_FREQ
-static int
-cris_time_freq_notifier(struct notifier_block *nb, unsigned long val,
-                       void *data);
+static int cris_time_freq_notifier(struct notifier_block *nb,
+                                  unsigned long val, void *data);
 
 static struct notifier_block cris_time_freq_notifier_block = {
        .notifier_call = cris_time_freq_notifier,
@@ -87,7 +86,6 @@ unsigned long get_ns_in_jiffie(void)
        return ns;
 }
 
-
 /* From timer MDS describing the hardware watchdog:
  * 4.3.1 Watchdog Operation
  * The watchdog timer is an 8-bit timer with a configurable start value.
@@ -109,11 +107,18 @@ static short int watchdog_key = 42;  /* arbitrary 7 bit number */
  * is used though, so set this really low. */
 #define WATCHDOG_MIN_FREE_PAGES 8
 
+/* for reliable NICE_DOGGY behaviour */
+static int bite_in_progress;
+
 void reset_watchdog(void)
 {
 #if defined(CONFIG_ETRAX_WATCHDOG)
        reg_timer_rw_wd_ctrl wd_ctrl = { 0 };
 
+#if defined(CONFIG_ETRAX_WATCHDOG_NICE_DOGGY)
+       if (unlikely(bite_in_progress))
+               return;
+#endif
        /* Only keep watchdog happy as long as we have memory left! */
        if(nr_free_pages() > WATCHDOG_MIN_FREE_PAGES) {
                /* Reset the watchdog with the inverse of the old key */
@@ -148,7 +153,9 @@ void handle_watchdog_bite(struct pt_regs *regs)
 #if defined(CONFIG_ETRAX_WATCHDOG)
        extern int cause_of_death;
 
+       nmi_enter();
        oops_in_progress = 1;
+       bite_in_progress = 1;
        printk(KERN_WARNING "Watchdog bite\n");
 
        /* Check if forced restart or unexpected watchdog */
@@ -170,6 +177,7 @@ void handle_watchdog_bite(struct pt_regs *regs)
        printk(KERN_WARNING "Oops: bitten by watchdog\n");
        show_registers(regs);
        oops_in_progress = 0;
+       printk("\n"); /* Flush mtdoops.  */
 #ifndef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
        reset_watchdog();
 #endif
@@ -202,7 +210,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
        /* Reset watchdog otherwise it resets us! */
        reset_watchdog();
 
-        /* Update statistics. */
+       /* Update statistics. */
        update_process_times(user_mode(regs));
 
        cris_do_profile(regs); /* Save profiling information */
@@ -213,7 +221,7 @@ static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
 
        /* Call the real timer interrupt handler */
        xtime_update(1);
-        return IRQ_HANDLED;
+       return IRQ_HANDLED;
 }
 
 /* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain. */
@@ -293,14 +301,13 @@ void __init time_init(void)
 
 #ifdef CONFIG_CPU_FREQ
        cpufreq_register_notifier(&cris_time_freq_notifier_block,
-               CPUFREQ_TRANSITION_NOTIFIER);
+                                 CPUFREQ_TRANSITION_NOTIFIER);
 #endif
 }
 
 #ifdef CONFIG_CPU_FREQ
-static int
-cris_time_freq_notifier(struct notifier_block *nb, unsigned long val,
-                       void *data)
+static int cris_time_freq_notifier(struct notifier_block *nb,
+                                  unsigned long val, void *data)
 {
        struct cpufreq_freqs *freqs = data;
        if (val == CPUFREQ_POSTCHANGE) {
index 0b5b70d5f58a45ca2554daf64ccb4625c1ff725f..f0f335d8aa7928101fd0bf1d8f7d80c0e2f950f6 100644 (file)
@@ -26,8 +26,7 @@
 /* Copy to userspace.  This is based on the memcpy used for
    kernel-to-kernel copying; see "string.c".  */
 
-unsigned long
-__copy_user (void __user *pdst, const void *psrc, unsigned long pn)
+unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
@@ -155,13 +154,13 @@ __copy_user (void __user *pdst, const void *psrc, unsigned long pn)
 
   return retn;
 }
+EXPORT_SYMBOL(__copy_user);
 
 /* Copy from user to kernel, zeroing the bytes that were inaccessible in
    userland.  The return-value is the number of bytes that were
    inaccessible.  */
-
-unsigned long
-__copy_user_zeroing(void *pdst, const void __user *psrc, unsigned long pn)
+unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
+                                 unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
@@ -321,11 +320,10 @@ copy_exception_bytes:
 
   return retn + n;
 }
+EXPORT_SYMBOL(__copy_user_zeroing);
 
 /* Zero userspace.  */
-
-unsigned long
-__do_clear_user (void __user *pto, unsigned long pn)
+unsigned long __do_clear_user(void __user *pto, unsigned long pn)
 {
   /* We want the parameters put in special registers.
      Make sure the compiler is able to make something useful of this.
@@ -468,3 +466,4 @@ __do_clear_user (void __user *pto, unsigned long pn)
 
   return retn;
 }
+EXPORT_SYMBOL(__do_clear_user);
index 38f29eec14a617af2691c8ce35545f1fcc8a4422..05a04708b8eb45b43bfe6cb615e306b472c3de32 100644 (file)
@@ -26,7 +26,29 @@ static DEFINE_SPINLOCK(pinmux_lock);
 
 static void crisv32_pinmux_set(int port);
 
-int crisv32_pinmux_init(void)
+static int __crisv32_pinmux_alloc(int port, int first_pin, int last_pin,
+                                enum pin_mode mode)
+{
+       int i;
+
+       for (i = first_pin; i <= last_pin; i++) {
+               if ((pins[port][i] != pinmux_none)
+                   && (pins[port][i] != pinmux_gpio)
+                   && (pins[port][i] != mode)) {
+#ifdef DEBUG
+                       panic("Pinmux alloc failed!\n");
+#endif
+                       return -EPERM;
+               }
+       }
+
+       for (i = first_pin; i <= last_pin; i++)
+               pins[port][i] = mode;
+
+       crisv32_pinmux_set(port);
+}
+
+static int crisv32_pinmux_init(void)
 {
        static int initialized;
 
@@ -37,20 +59,20 @@ int crisv32_pinmux_init(void)
                pa.pa0 = pa.pa1 = pa.pa2 = pa.pa3 =
                    pa.pa4 = pa.pa5 = pa.pa6 = pa.pa7 = regk_pinmux_yes;
                REG_WR(pinmux, regi_pinmux, rw_pa, pa);
-               crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio);
-               crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio);
-               crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio);
-               crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio);
+               __crisv32_pinmux_alloc(PORT_B, 0, PORT_PINS - 1, pinmux_gpio);
+               __crisv32_pinmux_alloc(PORT_C, 0, PORT_PINS - 1, pinmux_gpio);
+               __crisv32_pinmux_alloc(PORT_D, 0, PORT_PINS - 1, pinmux_gpio);
+               __crisv32_pinmux_alloc(PORT_E, 0, PORT_PINS - 1, pinmux_gpio);
        }
 
        return 0;
 }
 
-int
-crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
+int crisv32_pinmux_alloc(int port, int first_pin, int last_pin,
+                        enum pin_mode mode)
 {
-       int i;
        unsigned long flags;
+       int ret;
 
        crisv32_pinmux_init();
 
@@ -59,26 +81,11 @@ crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode)
 
        spin_lock_irqsave(&pinmux_lock, flags);
 
-       for (i = first_pin; i <= last_pin; i++) {
-               if ((pins[port][i] != pinmux_none)
-                   && (pins[port][i] != pinmux_gpio)
-                   && (pins[port][i] != mode)) {
-                       spin_unlock_irqrestore(&pinmux_lock, flags);
-#ifdef DEBUG
-                       panic("Pinmux alloc failed!\n");
-#endif
-                       return -EPERM;
-               }
-       }
-
-       for (i = first_pin; i <= last_pin; i++)
-               pins[port][i] = mode;
-
-       crisv32_pinmux_set(port);
+       ret = __crisv32_pinmux_alloc(port, first_pin, last_pin, mode);
 
        spin_unlock_irqrestore(&pinmux_lock, flags);
 
-       return 0;
+       return ret;
 }
 
 int crisv32_pinmux_alloc_fixed(enum fixed_function function)
@@ -98,58 +105,58 @@ int crisv32_pinmux_alloc_fixed(enum fixed_function function)
 
        switch (function) {
        case pinmux_ser1:
-               ret = crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed);
+               ret = __crisv32_pinmux_alloc(PORT_C, 4, 7, pinmux_fixed);
                hwprot.ser1 = regk_pinmux_yes;
                break;
        case pinmux_ser2:
-               ret = crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed);
+               ret = __crisv32_pinmux_alloc(PORT_C, 8, 11, pinmux_fixed);
                hwprot.ser2 = regk_pinmux_yes;
                break;
        case pinmux_ser3:
-               ret = crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed);
+               ret = __crisv32_pinmux_alloc(PORT_C, 12, 15, pinmux_fixed);
                hwprot.ser3 = regk_pinmux_yes;
                break;
        case pinmux_sser0:
-               ret = crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed);
-               ret |= crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
+               ret = __crisv32_pinmux_alloc(PORT_C, 0, 3, pinmux_fixed);
+               ret |= __crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
                hwprot.sser0 = regk_pinmux_yes;
                break;
        case pinmux_sser1:
-               ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
+               ret = __crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
                hwprot.sser1 = regk_pinmux_yes;
                break;
        case pinmux_ata0:
-               ret = crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed);
-               ret |= crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed);
+               ret = __crisv32_pinmux_alloc(PORT_D, 5, 7, pinmux_fixed);
+               ret |= __crisv32_pinmux_alloc(PORT_D, 15, 17, pinmux_fixed);
                hwprot.ata0 = regk_pinmux_yes;
                break;
        case pinmux_ata1:
-               ret = crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
-               ret |= crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed);
+               ret = __crisv32_pinmux_alloc(PORT_D, 0, 4, pinmux_fixed);
+               ret |= __crisv32_pinmux_alloc(PORT_E, 17, 17, pinmux_fixed);
                hwprot.ata1 = regk_pinmux_yes;
                break;
        case pinmux_ata2:
-               ret = crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed);
-               ret |= crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed);
+               ret = __crisv32_pinmux_alloc(PORT_C, 11, 15, pinmux_fixed);
+               ret |= __crisv32_pinmux_alloc(PORT_E, 3, 3, pinmux_fixed);
                hwprot.ata2 = regk_pinmux_yes;
                break;
        case pinmux_ata3:
-               ret = crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed);
-               ret |= crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed);
+               ret = __crisv32_pinmux_alloc(PORT_C, 8, 10, pinmux_fixed);
+               ret |= __crisv32_pinmux_alloc(PORT_C, 0, 2, pinmux_fixed);
                hwprot.ata2 = regk_pinmux_yes;
                break;
        case pinmux_ata:
-               ret = crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed);
-               ret |= crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed);
+               ret = __crisv32_pinmux_alloc(PORT_B, 0, 15, pinmux_fixed);
+               ret |= __crisv32_pinmux_alloc(PORT_D, 8, 15, pinmux_fixed);
                hwprot.ata = regk_pinmux_yes;
                break;
        case pinmux_eth1:
-               ret = crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed);
+               ret = __crisv32_pinmux_alloc(PORT_E, 0, 17, pinmux_fixed);
                hwprot.eth1 = regk_pinmux_yes;
                hwprot.eth1_mgm = regk_pinmux_yes;
                break;
        case pinmux_timer:
-               ret = crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
+               ret = __crisv32_pinmux_alloc(PORT_C, 16, 16, pinmux_fixed);
                hwprot.timer = regk_pinmux_yes;
                spin_unlock_irqrestore(&pinmux_lock, flags);
                return ret;
@@ -188,9 +195,19 @@ void crisv32_pinmux_set(int port)
 #endif
 }
 
-int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
+static int __crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
 {
        int i;
+
+       for (i = first_pin; i <= last_pin; i++)
+               pins[port][i] = pinmux_none;
+
+       crisv32_pinmux_set(port);
+       return 0;
+}
+
+int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
+{
        unsigned long flags;
 
        crisv32_pinmux_init();
@@ -199,11 +216,7 @@ int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin)
                return -EINVAL;
 
        spin_lock_irqsave(&pinmux_lock, flags);
-
-       for (i = first_pin; i <= last_pin; i++)
-               pins[port][i] = pinmux_none;
-
-       crisv32_pinmux_set(port);
+       __crisv32_pinmux_dealloc(port, first_pin, last_pin);
        spin_unlock_irqrestore(&pinmux_lock, flags);
 
        return 0;
@@ -226,58 +239,58 @@ int crisv32_pinmux_dealloc_fixed(enum fixed_function function)
 
        switch (function) {
        case pinmux_ser1:
-               ret = crisv32_pinmux_dealloc(PORT_C, 4, 7);
+               ret = __crisv32_pinmux_dealloc(PORT_C, 4, 7);
                hwprot.ser1 = regk_pinmux_no;
                break;
        case pinmux_ser2:
-               ret = crisv32_pinmux_dealloc(PORT_C, 8, 11);
+               ret = __crisv32_pinmux_dealloc(PORT_C, 8, 11);
                hwprot.ser2 = regk_pinmux_no;
                break;
        case pinmux_ser3:
-               ret = crisv32_pinmux_dealloc(PORT_C, 12, 15);
+               ret = __crisv32_pinmux_dealloc(PORT_C, 12, 15);
                hwprot.ser3 = regk_pinmux_no;
                break;
        case pinmux_sser0:
-               ret = crisv32_pinmux_dealloc(PORT_C, 0, 3);
-               ret |= crisv32_pinmux_dealloc(PORT_C, 16, 16);
+               ret = __crisv32_pinmux_dealloc(PORT_C, 0, 3);
+               ret |= __crisv32_pinmux_dealloc(PORT_C, 16, 16);
                hwprot.sser0 = regk_pinmux_no;
                break;
        case pinmux_sser1:
-               ret = crisv32_pinmux_dealloc(PORT_D, 0, 4);
+               ret = __crisv32_pinmux_dealloc(PORT_D, 0, 4);
                hwprot.sser1 = regk_pinmux_no;
                break;
        case pinmux_ata0:
-               ret = crisv32_pinmux_dealloc(PORT_D, 5, 7);
-               ret |= crisv32_pinmux_dealloc(PORT_D, 15, 17);
+               ret = __crisv32_pinmux_dealloc(PORT_D, 5, 7);
+               ret |= __crisv32_pinmux_dealloc(PORT_D, 15, 17);
                hwprot.ata0 = regk_pinmux_no;
                break;
        case pinmux_ata1:
-               ret = crisv32_pinmux_dealloc(PORT_D, 0, 4);
-               ret |= crisv32_pinmux_dealloc(PORT_E, 17, 17);
+               ret = __crisv32_pinmux_dealloc(PORT_D, 0, 4);
+               ret |= __crisv32_pinmux_dealloc(PORT_E, 17, 17);
                hwprot.ata1 = regk_pinmux_no;
                break;
        case pinmux_ata2:
-               ret = crisv32_pinmux_dealloc(PORT_C, 11, 15);
-               ret |= crisv32_pinmux_dealloc(PORT_E, 3, 3);
+               ret = __crisv32_pinmux_dealloc(PORT_C, 11, 15);
+               ret |= __crisv32_pinmux_dealloc(PORT_E, 3, 3);
                hwprot.ata2 = regk_pinmux_no;
                break;
        case pinmux_ata3:
-               ret = crisv32_pinmux_dealloc(PORT_C, 8, 10);
-               ret |= crisv32_pinmux_dealloc(PORT_C, 0, 2);
+               ret = __crisv32_pinmux_dealloc(PORT_C, 8, 10);
+               ret |= __crisv32_pinmux_dealloc(PORT_C, 0, 2);
                hwprot.ata2 = regk_pinmux_no;
                break;
        case pinmux_ata:
-               ret = crisv32_pinmux_dealloc(PORT_B, 0, 15);
-               ret |= crisv32_pinmux_dealloc(PORT_D, 8, 15);
+               ret = __crisv32_pinmux_dealloc(PORT_B, 0, 15);
+               ret |= __crisv32_pinmux_dealloc(PORT_D, 8, 15);
                hwprot.ata = regk_pinmux_no;
                break;
        case pinmux_eth1:
-               ret = crisv32_pinmux_dealloc(PORT_E, 0, 17);
+               ret = __crisv32_pinmux_dealloc(PORT_E, 0, 17);
                hwprot.eth1 = regk_pinmux_no;
                hwprot.eth1_mgm = regk_pinmux_no;
                break;
        case pinmux_timer:
-               ret = crisv32_pinmux_dealloc(PORT_C, 16, 16);
+               ret = __crisv32_pinmux_dealloc(PORT_C, 16, 16);
                hwprot.timer = regk_pinmux_no;
                spin_unlock_irqrestore(&pinmux_lock, flags);
                return ret;
@@ -293,7 +306,8 @@ int crisv32_pinmux_dealloc_fixed(enum fixed_function function)
        return ret;
 }
 
-void crisv32_pinmux_dump(void)
+#ifdef DEBUG
+static void crisv32_pinmux_dump(void)
 {
        int i, j;
 
@@ -305,5 +319,5 @@ void crisv32_pinmux_dump(void)
                        printk(KERN_DEBUG "  Pin %d = %d\n", j, pins[i][j]);
        }
 }
-
+#endif
 __initcall(crisv32_pinmux_init);
index c2b3036779df5ad3b52e7d24b2c228398da3dc8e..09bf0c90d2d3d59ee924003ce963cc10d9b07328 100644 (file)
@@ -28,11 +28,9 @@ enum fixed_function {
   pinmux_timer
 };
 
-int crisv32_pinmux_init(void);
 int crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode);
 int crisv32_pinmux_alloc_fixed(enum fixed_function function);
 int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin);
 int crisv32_pinmux_dealloc_fixed(enum fixed_function function);
-void crisv32_pinmux_dump(void);
 
 #endif
index d5f124832fd1e0f337259d5467eb0cd286fb91dd..889f2de050a34e80e1cbd1424e8307a1f7442566 100644 (file)
@@ -1,8 +1,4 @@
 
-header-y += arch-v10/
-header-y += arch-v32/
-
-
 generic-y += barrier.h
 generic-y += clkdev.h
 generic-y += cputime.h
index 7d47b366ad82b49d2ea19e734b8d7be859f95080..01f66b8f15e50b83a75fcd9693d253348a459297 100644 (file)
@@ -1,8 +1,8 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
-header-y += arch-v10/
-header-y += arch-v32/
+header-y += ../arch-v10/arch/
+header-y += ../arch-v32/arch/
 header-y += auxvec.h
 header-y += bitsperlong.h
 header-y += byteorder.h
index 5868cee20ebd8bdb3940ba14ce55982ca8961e9a..3908b942fd4c216c878e66faa39e254e1032d017 100644 (file)
@@ -47,16 +47,16 @@ EXPORT_SYMBOL(__negdi2);
 EXPORT_SYMBOL(__ioremap);
 EXPORT_SYMBOL(iounmap);
 
-/* Userspace access functions */
-EXPORT_SYMBOL(__copy_user_zeroing);
-EXPORT_SYMBOL(__copy_user);
-
 #undef memcpy
 #undef memset
 extern void * memset(void *, int, __kernel_size_t);
 extern void * memcpy(void *, const void *, __kernel_size_t);
 EXPORT_SYMBOL(memcpy);
 EXPORT_SYMBOL(memset);
+#ifdef CONFIG_ETRAX_ARCH_V32
+#undef strcmp
+EXPORT_SYMBOL(strcmp);
+#endif
 
 #ifdef CONFIG_ETRAX_FAST_TIMER
 /* Fast timer functions */
@@ -66,3 +66,4 @@ EXPORT_SYMBOL(del_fast_timer);
 EXPORT_SYMBOL(schedule_usleep);
 #endif
 EXPORT_SYMBOL(csum_partial);
+EXPORT_SYMBOL(csum_partial_copy_from_user);
index 0ffda73734f538d762a5e8a996b2d5e0d669fb7c..da4c72401e277f66ccfea6661a654a9f4a5d585e 100644 (file)
 
 #include <linux/init.h>
 #include <linux/module.h>
+#include <linux/utsname.h>
+#ifdef CONFIG_KALLSYMS
+#include <linux/kallsyms.h>
+#endif
 
 #include <asm/pgtable.h>
 #include <asm/uaccess.h>
@@ -34,25 +38,24 @@ static int kstack_depth_to_print = 24;
 
 void (*nmi_handler)(struct pt_regs *);
 
-void
-show_trace(unsigned long *stack)
+void show_trace(unsigned long *stack)
 {
        unsigned long addr, module_start, module_end;
        extern char _stext, _etext;
        int i;
 
-       printk("\nCall Trace: ");
+       pr_err("\nCall Trace: ");
 
        i = 1;
        module_start = VMALLOC_START;
        module_end = VMALLOC_END;
 
-       while (((long)stack & (THREAD_SIZE-1)) != 0) {
+       while (((long)stack & (THREAD_SIZE - 1)) != 0) {
                if (__get_user(addr, stack)) {
                        /* This message matches "failing address" marked
                           s390 in ksymoops, so lines containing it will
                           not be filtered out by ksymoops.  */
-                       printk("Failing address 0x%lx\n", (unsigned long)stack);
+                       pr_err("Failing address 0x%lx\n", (unsigned long)stack);
                        break;
                }
                stack++;
@@ -68,10 +71,14 @@ show_trace(unsigned long *stack)
                if (((addr >= (unsigned long)&_stext) &&
                     (addr <= (unsigned long)&_etext)) ||
                    ((addr >= module_start) && (addr <= module_end))) {
+#ifdef CONFIG_KALLSYMS
+                       print_ip_sym(addr);
+#else
                        if (i && ((i % 8) == 0))
-                               printk("\n       ");
-                       printk("[<%08lx>] ", addr);
+                               pr_err("\n       ");
+                       pr_err("[<%08lx>] ", addr);
                        i++;
+#endif
                }
        }
 }
@@ -111,21 +118,21 @@ show_stack(struct task_struct *task, unsigned long *sp)
 
        stack = sp;
 
-       printk("\nStack from %08lx:\n       ", (unsigned long)stack);
+       pr_err("\nStack from %08lx:\n       ", (unsigned long)stack);
        for (i = 0; i < kstack_depth_to_print; i++) {
                if (((long)stack & (THREAD_SIZE-1)) == 0)
                        break;
                if (i && ((i % 8) == 0))
-                       printk("\n       ");
+                       pr_err("\n       ");
                if (__get_user(addr, stack)) {
                        /* This message matches "failing address" marked
                           s390 in ksymoops, so lines containing it will
                           not be filtered out by ksymoops.  */
-                       printk("Failing address 0x%lx\n", (unsigned long)stack);
+                       pr_err("Failing address 0x%lx\n", (unsigned long)stack);
                        break;
                }
                stack++;
-               printk("%08lx ", addr);
+               pr_err("%08lx ", addr);
        }
        show_trace(sp);
 }
@@ -139,33 +146,32 @@ show_stack(void)
        unsigned long *sp = (unsigned long *)rdusp();
        int i;
 
-       printk("Stack dump [0x%08lx]:\n", (unsigned long)sp);
+       pr_err("Stack dump [0x%08lx]:\n", (unsigned long)sp);
        for (i = 0; i < 16; i++)
-               printk("sp + %d: 0x%08lx\n", i*4, sp[i]);
+               pr_err("sp + %d: 0x%08lx\n", i*4, sp[i]);
        return 0;
 }
 #endif
 
-void
-set_nmi_handler(void (*handler)(struct pt_regs *))
+void set_nmi_handler(void (*handler)(struct pt_regs *))
 {
        nmi_handler = handler;
        arch_enable_nmi();
 }
 
 #ifdef CONFIG_DEBUG_NMI_OOPS
-void
-oops_nmi_handler(struct pt_regs *regs)
+void oops_nmi_handler(struct pt_regs *regs)
 {
        stop_watchdog();
        oops_in_progress = 1;
-       printk("NMI!\n");
+       pr_err("NMI!\n");
        show_registers(regs);
        oops_in_progress = 0;
+       oops_exit();
+       pr_err("\n"); /* Flush mtdoops.  */
 }
 
-static int __init
-oops_nmi_register(void)
+static int __init oops_nmi_register(void)
 {
        set_nmi_handler(oops_nmi_handler);
        return 0;
@@ -180,8 +186,7 @@ __initcall(oops_nmi_register);
  * similar to an Oops dump, and if the kernel is configured to be a nice
  * doggy, then halt instead of reboot.
  */
-void
-watchdog_bite_hook(struct pt_regs *regs)
+void watchdog_bite_hook(struct pt_regs *regs)
 {
 #ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
        local_irq_disable();
@@ -196,8 +201,7 @@ watchdog_bite_hook(struct pt_regs *regs)
 }
 
 /* This is normally the Oops function. */
-void
-die_if_kernel(const char *str, struct pt_regs *regs, long err)
+void die_if_kernel(const char *str, struct pt_regs *regs, long err)
 {
        if (user_mode(regs))
                return;
@@ -211,13 +215,17 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
        stop_watchdog();
 #endif
 
+       oops_enter();
        handle_BUG(regs);
 
-       printk("%s: %04lx\n", str, err & 0xffff);
+       pr_err("Linux %s %s\n", utsname()->release, utsname()->version);
+       pr_err("%s: %04lx\n", str, err & 0xffff);
 
        show_registers(regs);
 
+       oops_exit();
        oops_in_progress = 0;
+       pr_err("\n"); /* Flush mtdoops.  */
 
 #ifdef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
        reset_watchdog();
@@ -225,8 +233,7 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
        do_exit(SIGSEGV);
 }
 
-void __init
-trap_init(void)
+void __init trap_init(void)
 {
        /* Nothing needs to be done */
 }
index c81af5bd916792791ea05baf006270cf459fb26a..1e7fd45b60f82af0beefb2c609151a3879a2b6a7 100644 (file)
 #include <linux/gfp.h>
 #include <linux/init.h>
 #include <linux/bootmem.h>
+#include <linux/proc_fs.h>
+#include <linux/kcore.h>
 #include <asm/tlb.h>
 #include <asm/sections.h>
 
 unsigned long empty_zero_page;
+EXPORT_SYMBOL(empty_zero_page);
 
-void __init
-mem_init(void)
+void __init mem_init(void)
 {
        BUG_ON(!mem_map);
 
@@ -31,10 +33,36 @@ mem_init(void)
        mem_init_print_info(NULL);
 }
 
-/* free the pages occupied by initialization code */
+/* Free a range of init pages. Virtual addresses. */
 
-void 
-free_initmem(void)
+void free_init_pages(const char *what, unsigned long begin, unsigned long end)
+{
+       unsigned long addr;
+
+       for (addr = begin; addr < end; addr += PAGE_SIZE) {
+               ClearPageReserved(virt_to_page(addr));
+               init_page_count(virt_to_page(addr));
+               free_page(addr);
+               totalram_pages++;
+       }
+
+       printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
+}
+
+/* Free the pages occupied by initialization code. */
+
+void free_initmem(void)
 {
        free_initmem_default(-1);
 }
+
+/* Free the pages occupied by initrd code. */
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void free_initrd_mem(unsigned long start, unsigned long end)
+{
+       free_init_pages("initrd memory",
+                       start,
+                       end);
+}
+#endif
index f9ca44bdea20f6cd2031b146441f88cbfa594bec..80fdb995a8ce406abcfd207a5faccd28094a8636 100644 (file)
@@ -76,10 +76,11 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
  * Must be freed with iounmap.
  */
 
-void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
+void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
 {
         return __ioremap(phys_addr | MEM_NON_CACHEABLE, size, 0);
 }
+EXPORT_SYMBOL(ioremap_nocache);
 
 void iounmap(volatile void __iomem *addr)
 {
index 263511719a4ade5b6d81a563c92d3a5d368c2bb7..69952c18420708f4f5328021c7988aa1f285ed64 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Cache definitions for the Hexagon architecture
  *
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2011,2014 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -25,6 +25,8 @@
 #define L1_CACHE_SHIFT         (5)
 #define L1_CACHE_BYTES         (1 << L1_CACHE_SHIFT)
 
+#define ARCH_DMA_MINALIGN      L1_CACHE_BYTES
+
 #define __cacheline_aligned    __aligned(L1_CACHE_BYTES)
 #define ____cacheline_aligned  __aligned(L1_CACHE_BYTES)
 
index 49e0896ec2401464f93ef32ba4f279d1ca88181c..b86f9f300e949fe86e28a83f9bc8702084006f1c 100644 (file)
 #ifndef _ASM_CACHEFLUSH_H
 #define _ASM_CACHEFLUSH_H
 
-#include <linux/cache.h>
-#include <linux/mm.h>
-#include <asm/string.h>
-#include <asm-generic/cacheflush.h>
+#include <linux/mm_types.h>
 
 /* Cache flushing:
  *
 #define LINESIZE       32
 #define LINEBITS       5
 
+#define flush_cache_all()                      do { } while (0)
+#define flush_cache_mm(mm)                     do { } while (0)
+#define flush_cache_dup_mm(mm)                 do { } while (0)
+#define flush_cache_range(vma, start, end)     do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn)     do { } while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page)                        do { } while (0)
+#define flush_dcache_mmap_lock(mapping)                do { } while (0)
+#define flush_dcache_mmap_unlock(mapping)      do { } while (0)
+#define flush_icache_page(vma, pg)             do { } while (0)
+#define flush_icache_user_range(vma, pg, adr, len)     do { } while (0)
+#define flush_cache_vmap(start, end)           do { } while (0)
+#define flush_cache_vunmap(start, end)         do { } while (0)
+
 /*
  * Flush Dcache range through current map.
  */
@@ -49,7 +60,6 @@ extern void flush_dcache_range(unsigned long start, unsigned long end);
 /*
  * Flush Icache range through current map.
  */
-#undef flush_icache_range
 extern void flush_icache_range(unsigned long start, unsigned long end);
 
 /*
@@ -79,19 +89,11 @@ static inline void update_mmu_cache(struct vm_area_struct *vma,
        /*  generic_ptrace_pokedata doesn't wind up here, does it?  */
 }
 
-#undef copy_to_user_page
-static inline void copy_to_user_page(struct vm_area_struct *vma,
-                                            struct page *page,
-                                            unsigned long vaddr,
-                                            void *dst, void *src, int len)
-{
-       memcpy(dst, src, len);
-       if (vma->vm_flags & VM_EXEC) {
-               flush_icache_range((unsigned long) dst,
-               (unsigned long) dst + len);
-       }
-}
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+                      unsigned long vaddr, void *dst, void *src, int len);
 
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+       memcpy(dst, src, len)
 
 extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
 extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
index 70298996e9b2cbb69a4e05d863ce0c7fec6f90c2..66f5e9a61efca05782cd45d4fec819a4993cfb15 100644 (file)
 #ifdef __KERNEL__
 
 #include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/vmalloc.h>
-#include <asm/string.h>
-#include <asm/mem-layout.h>
 #include <asm/iomap.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
 
 /*
  * We don't have PCI yet.
index 0e7c1dbb37b2144f57432481ff7af9da560eb5cb..6981949f5df3c3b8a2adef85d99de7501636d045 100644 (file)
@@ -19,6 +19,7 @@
  */
 
 #include <linux/init.h>
+#include <linux/delay.h>
 #include <linux/bootmem.h>
 #include <linux/mmzone.h>
 #include <linux/mm.h>
index 7858663352b9ec1662959ad759a62ca07e3b639c..110dab152f82c28074ff4f807a6ce193fbc0afad 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Kernel traps/events for Hexagon processor
  *
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -423,7 +423,7 @@ void do_trap0(struct pt_regs *regs)
                         */
                        info.si_code = TRAP_BRKPT;
                        info.si_addr = (void __user *) pt_elr(regs);
-                       send_sig_info(SIGTRAP, &info, current);
+                       force_sig_info(SIGTRAP, &info, current);
                } else {
 #ifdef CONFIG_KGDB
                        kgdb_handle_exception(pt_cause(regs), SIGTRAP,
index 44d8c47bae2f6cc6b8ba9566e15ae7af718ad871..5f268c1071b3df8f56b4fd4871fbb1b5b19e8d72 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Linker script for Hexagon kernel
  *
- * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -59,7 +59,7 @@ SECTIONS
        INIT_DATA_SECTION(PAGE_SIZE)
 
        _sdata = .;
-               RW_DATA_SECTION(32,PAGE_SIZE,PAGE_SIZE)
+               RW_DATA_SECTION(32,PAGE_SIZE,_THREAD_SIZE)
                RO_DATA_SECTION(PAGE_SIZE)
        _edata = .;
 
index 0c76c802e31ce864a0374f3a397a7c403077fec0..a7c6d827d8b616632a32289dd2d3fb4a2e5c423a 100644 (file)
@@ -127,3 +127,13 @@ void flush_cache_all_hexagon(void)
        local_irq_restore(flags);
        mb();
 }
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+                      unsigned long vaddr, void *dst, void *src, int len)
+{
+       memcpy(dst, src, len);
+       if (vma->vm_flags & VM_EXEC) {
+               flush_icache_range((unsigned long) dst,
+               (unsigned long) dst + len);
+       }
+}
index 5905fd5f97f666fa9379c7860699fe9bf8807407..d27d67224046a9c87d6c5411d465e17ee104afc5 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <linux/io.h>
 #include <linux/vmalloc.h>
+#include <linux/mm.h>
 
 void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
 {
index 14aa1c58912b6ff70a62d35f77c052fa80ee2930..0ec484d2dcbcad7c175b2510871b10c63644108a 100644 (file)
@@ -35,8 +35,8 @@ extern void *per_cpu_init(void);
 
 /*
  * Be extremely careful when taking the address of this variable!  Due to virtual
- * remapping, it is different from the canonical address returned by __get_cpu_var(var)!
- * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
+ * remapping, it is different from the canonical address returned by this_cpu_ptr(&var)!
+ * On the positive side, using __ia64_per_cpu_var() instead of this_cpu_ptr() is slightly
  * more efficient.
  */
 #define __ia64_per_cpu_var(var) (*({                                   \
index 203e4403c366869ed1acbd99cbf4e82c92b83942..48a9dfc55b51aa4a3819bae6c686b0cd5f38a7f0 100644 (file)
@@ -374,7 +374,7 @@ static long alchemy_calc_div(unsigned long rate, unsigned long prate,
 
 static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate,
                                        unsigned long *best_parent_rate,
-                                       struct clk **best_parent_clk,
+                                       struct clk_hw **best_parent_clk,
                                        int scale, int maxdiv)
 {
        struct clk *pc, *bpc, *free;
@@ -453,7 +453,7 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate,
        }
 
        *best_parent_rate = bpr;
-       *best_parent_clk = bpc;
+       *best_parent_clk = __clk_get_hw(bpc);
        return br;
 }
 
@@ -547,7 +547,7 @@ static unsigned long alchemy_clk_fgv1_recalc(struct clk_hw *hw,
 
 static long alchemy_clk_fgv1_detr(struct clk_hw *hw, unsigned long rate,
                                        unsigned long *best_parent_rate,
-                                       struct clk **best_parent_clk)
+                                       struct clk_hw **best_parent_clk)
 {
        return alchemy_clk_fgcs_detr(hw, rate, best_parent_rate,
                                     best_parent_clk, 2, 512);
@@ -679,7 +679,7 @@ static unsigned long alchemy_clk_fgv2_recalc(struct clk_hw *hw,
 
 static long alchemy_clk_fgv2_detr(struct clk_hw *hw, unsigned long rate,
                                        unsigned long *best_parent_rate,
-                                       struct clk **best_parent_clk)
+                                       struct clk_hw **best_parent_clk)
 {
        struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
        int scale, maxdiv;
@@ -898,7 +898,7 @@ static int alchemy_clk_csrc_setr(struct clk_hw *hw, unsigned long rate,
 
 static long alchemy_clk_csrc_detr(struct clk_hw *hw, unsigned long rate,
                                        unsigned long *best_parent_rate,
-                                       struct clk **best_parent_clk)
+                                       struct clk_hw **best_parent_clk)
 {
        struct alchemy_fgcs_clk *c = to_fgcs_clk(hw);
        int scale = c->dt[2] == 3 ? 1 : 2; /* au1300 check */
index 46e8f7676a15a0edadb3729cc7b8c6aebbd4d04b..3bdb72a7036491b18e5080f8a2fee75f9a22a5b8 100644 (file)
@@ -36,7 +36,7 @@ CONFIG_PCI=y
 CONFIG_PCI_REALLOC_ENABLE_AUTO=y
 CONFIG_PCCARD=y
 CONFIG_PCMCIA_ALCHEMY_DEVBOARD=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_PACKET_DIAG=y
index 227a9de32246d17553080d4c587b7615c1669c62..e51aad9a94b15e9199607df4a988bf236573f9a3 100644 (file)
@@ -37,7 +37,6 @@ CONFIG_MIPS32_N32=y
 CONFIG_PM=y
 CONFIG_HIBERNATION=y
 CONFIG_PM_STD_PARTITION="/dev/hda3"
-CONFIG_PM_RUNTIME=y
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_DEBUG=y
 CONFIG_CPU_FREQ_STAT=m
index 1c6191ebd583cef098f9af252c6cece585e5fd51..7eabcd2031ea884bba3de32fc9cf944d61326ccc 100644 (file)
@@ -58,7 +58,7 @@ CONFIG_BINFMT_MISC=m
 CONFIG_MIPS32_COMPAT=y
 CONFIG_MIPS32_O32=y
 CONFIG_MIPS32_N32=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_NET=y
 CONFIG_PACKET=y
 CONFIG_UNIX=y
index 70509a48df826f3e43545b937737b295095e28e5..b3d1d37f85eace0778eaa57c3f8aa616aebf531c 100644 (file)
@@ -61,7 +61,7 @@ CONFIG_BINFMT_MISC=y
 CONFIG_MIPS32_COMPAT=y
 CONFIG_MIPS32_O32=y
 CONFIG_MIPS32_N32=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_PM_DEBUG=y
 CONFIG_NET=y
 CONFIG_PACKET=y
index 82207e8079f32696b9b179cf1bfc34242e4035ee..3d8016d6cf3eb143ff8b262e8a8de099fb41863a 100644 (file)
@@ -41,7 +41,7 @@ CONFIG_PCI=y
 CONFIG_PCI_MSI=y
 CONFIG_PCI_DEBUG=y
 CONFIG_BINFMT_MISC=m
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_PM_DEBUG=y
 CONFIG_NET=y
 CONFIG_PACKET=y
index 7cba480568c8f5d449b3b44870a515456f8441b2..70795a67a27622df94a6dcac714e930d7bf85557 100644 (file)
@@ -30,7 +30,7 @@ retry:
 
        return pte;
 #else
-       return ACCESS_ONCE(*ptep);
+       return READ_ONCE(*ptep);
 #endif
 }
 
index d2d11b7055ba640178817917d04c46364453c069..8121aa6db2ff21ad37510879dd82a3fe7ba7fa29 100644 (file)
 
 #endif /*!CONFIG_PA20*/
 
-/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.  */
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
+   We don't explicitly expose that "*a" may be written as reload
+   fails to find a register in class R1_REGS when "a" needs to be
+   reloaded when generating 64-bit PIC code.  Instead, we clobber
+   memory to indicate to the compiler that the assembly code reads
+   or writes to items other than those listed in the input and output
+   operands.  This may pessimize the code somewhat but __ldcw is
+   usually used within code blocks surrounded by memory barriors.  */
 #define __ldcw(a) ({                                           \
        unsigned __ret;                                         \
-       __asm__ __volatile__(__LDCW " 0(%2),%0"                 \
-               : "=r" (__ret), "+m" (*(a)) : "r" (a));         \
+       __asm__ __volatile__(__LDCW " 0(%1),%0"                 \
+               : "=r" (__ret) : "r" (a) : "memory");           \
        __ret;                                                  \
 })
 
index 2e637c881d2b44be41b9db66145ac7863cbe1be1..879de5efb073e3d0d075a924f139d73ff0830324 100644 (file)
@@ -36,7 +36,7 @@ CONFIG_KEXEC=y
 CONFIG_SCHED_SMT=y
 CONFIG_CMDLINE_BOOL=y
 CONFIG_CMDLINE=""
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_PM_DEBUG=y
 # CONFIG_SECCOMP is not set
 # CONFIG_PCI is not set
diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h
new file mode 100644 (file)
index 0000000..d2f99ca
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _ASM_POWERPC_CPUIDLE_H
+#define _ASM_POWERPC_CPUIDLE_H
+
+#ifdef CONFIG_PPC_POWERNV
+/* Used in powernv idle state management */
+#define PNV_THREAD_RUNNING              0
+#define PNV_THREAD_NAP                  1
+#define PNV_THREAD_SLEEP                2
+#define PNV_THREAD_WINKLE               3
+#define PNV_CORE_IDLE_LOCK_BIT          0x100
+#define PNV_CORE_IDLE_THREAD_BITS       0x0FF
+
+#ifndef __ASSEMBLY__
+extern u32 pnv_fastsleep_workaround_at_entry[];
+extern u32 pnv_fastsleep_workaround_at_exit[];
+#endif
+
+#endif
+
+#endif
index 5cd8d2fddba94c6e5ef8b363686989e8fcb9df16..eb95b675109b64c47b58a29673c0fc5bc4cfaa46 100644 (file)
@@ -56,6 +56,14 @@ struct opal_sg_list {
 #define OPAL_HARDWARE_FROZEN   -13
 #define OPAL_WRONG_STATE       -14
 #define OPAL_ASYNC_COMPLETION  -15
+#define OPAL_I2C_TIMEOUT       -17
+#define OPAL_I2C_INVALID_CMD   -18
+#define OPAL_I2C_LBUS_PARITY   -19
+#define OPAL_I2C_BKEND_OVERRUN -20
+#define OPAL_I2C_BKEND_ACCESS  -21
+#define OPAL_I2C_ARBT_LOST     -22
+#define OPAL_I2C_NACK_RCVD     -23
+#define OPAL_I2C_STOP_ERR      -24
 
 /* API Tokens (in r0) */
 #define OPAL_INVALID_CALL                      -1
@@ -152,12 +160,25 @@ struct opal_sg_list {
 #define OPAL_PCI_ERR_INJECT                    96
 #define OPAL_PCI_EEH_FREEZE_SET                        97
 #define OPAL_HANDLE_HMI                                98
+#define OPAL_CONFIG_CPU_IDLE_STATE             99
+#define OPAL_SLW_SET_REG                       100
 #define OPAL_REGISTER_DUMP_REGION              101
 #define OPAL_UNREGISTER_DUMP_REGION            102
 #define OPAL_WRITE_TPO                         103
 #define OPAL_READ_TPO                          104
 #define OPAL_IPMI_SEND                         107
 #define OPAL_IPMI_RECV                         108
+#define OPAL_I2C_REQUEST                       109
+
+/* Device tree flags */
+
+/* Flags set in power-mgmt nodes in device tree if
+ * respective idle states are supported in the platform.
+ */
+#define OPAL_PM_NAP_ENABLED    0x00010000
+#define OPAL_PM_SLEEP_ENABLED  0x00020000
+#define OPAL_PM_WINKLE_ENABLED 0x00040000
+#define OPAL_PM_SLEEP_ENABLED_ER1      0x00080000
 
 #ifndef __ASSEMBLY__
 
@@ -712,6 +733,24 @@ typedef struct oppanel_line {
        uint64_t        line_len;
 } oppanel_line_t;
 
+/* OPAL I2C request */
+struct opal_i2c_request {
+       uint8_t type;
+#define OPAL_I2C_RAW_READ      0
+#define OPAL_I2C_RAW_WRITE     1
+#define OPAL_I2C_SM_READ       2
+#define OPAL_I2C_SM_WRITE      3
+       uint8_t flags;
+#define OPAL_I2C_ADDR_10       0x01    /* Not supported yet */
+       uint8_t subaddr_sz;             /* Max 4 */
+       uint8_t reserved;
+       __be16 addr;                    /* 7 or 10 bit address */
+       __be16 reserved2;
+       __be32 subaddr;         /* Sub-address if any */
+       __be32 size;                    /* Data size */
+       __be64 buffer_ra;               /* Buffer real address */
+};
+
 /* /sys/firmware/opal */
 extern struct kobject *opal_kobj;
 
@@ -876,11 +915,14 @@ int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
 int64_t opal_handle_hmi(void);
 int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
 int64_t opal_unregister_dump_region(uint32_t id);
+int64_t opal_slw_set_reg(uint64_t cpu_pir, uint64_t sprn, uint64_t val);
 int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number);
 int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg,
                uint64_t msg_len);
 int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,
                uint64_t *msg_len);
+int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
+                        struct opal_i2c_request *oreq);
 
 /* Internal functions */
 extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
index 24a386cbb928b008dde32a71ba244b3c0b6aecbb..e5f22c6c4bf9dbbabec793b4293a92b854bafab0 100644 (file)
@@ -152,6 +152,16 @@ struct paca_struct {
        u64 tm_scratch;                 /* TM scratch area for reclaim */
 #endif
 
+#ifdef CONFIG_PPC_POWERNV
+       /* Per-core mask tracking idle threads and a lock bit-[L][TTTTTTTT] */
+       u32 *core_idle_state_ptr;
+       u8 thread_idle_state;           /* PNV_THREAD_RUNNING/NAP/SLEEP */
+       /* Mask to indicate thread id in core */
+       u8 thread_mask;
+       /* Mask to denote subcore sibling threads */
+       u8 subcore_sibling_mask;
+#endif
+
 #ifdef CONFIG_PPC_BOOK3S_64
        /* Exclusive emergency stack pointer for machine check exception. */
        void *mc_emergency_sp;
index 1a5287759fc86cdd1918abfcf7b2645031ac77c6..03cd858a401c067b1b34f18e9b5fddd521e8fe2d 100644 (file)
 
 #define PPC_INST_NAP                   0x4c000364
 #define PPC_INST_SLEEP                 0x4c0003a4
+#define PPC_INST_WINKLE                        0x4c0003e4
 
 /* A2 specific instructions */
 #define PPC_INST_ERATWE                        0x7c0001a6
 
 #define PPC_NAP                        stringify_in_c(.long PPC_INST_NAP)
 #define PPC_SLEEP              stringify_in_c(.long PPC_INST_SLEEP)
+#define PPC_WINKLE             stringify_in_c(.long PPC_INST_WINKLE)
 
 /* BHRB instructions */
 #define PPC_CLRBHRB            stringify_in_c(.long PPC_INST_CLRBHRB)
index 29c3798cf8000fad0c5124c5aa09577aa28d8f71..bf117d8fb45fe773bb6154578961dd031c921f28 100644 (file)
@@ -452,7 +452,8 @@ enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
 
 extern int powersave_nap;      /* set if nap mode can be used in idle loop */
 extern unsigned long power7_nap(int check_irq);
-extern void power7_sleep(void);
+extern unsigned long power7_sleep(void);
+extern unsigned long power7_winkle(void);
 extern void flush_instruction_cache(void);
 extern void hard_reset_now(void);
 extern void poweroff_now(void);
index c998279bd85b1b5cec6cf719b88fadc90aacd79f..1c874fb533bbf22fe8ff019b328ae623214243bb 100644 (file)
 #define __MSR          (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV)
 #ifdef __BIG_ENDIAN__
 #define MSR_           __MSR
+#define MSR_IDLE       (MSR_ME | MSR_SF | MSR_HV)
 #else
 #define MSR_           (__MSR | MSR_LE)
+#define MSR_IDLE       (MSR_ME | MSR_SF | MSR_HV | MSR_LE)
 #endif
 #define MSR_KERNEL     (MSR_ | MSR_64BIT)
 #define MSR_USER32     (MSR_ | MSR_PR | MSR_EE)
 #define SPRN_DBAT7L    0x23F   /* Data BAT 7 Lower Register */
 #define SPRN_DBAT7U    0x23E   /* Data BAT 7 Upper Register */
 #define SPRN_PPR       0x380   /* SMT Thread status Register */
+#define SPRN_TSCR      0x399   /* Thread Switch Control Register */
 
 #define SPRN_DEC       0x016           /* Decrement Register */
 #define SPRN_DER       0x095           /* Debug Enable Regsiter */
 #define SPRN_BESCR     806     /* Branch event status and control register */
 #define   BESCR_GE     0x8000000000000000ULL /* Global Enable */
 #define SPRN_WORT      895     /* Workload optimization register - thread */
+#define SPRN_WORC      863     /* Workload optimization register - core */
 
 #define SPRN_PMC1      787
 #define SPRN_PMC2      788
index 6240698fee9a60fbbbf5403c10f710a5c41b861b..ff21b7a2f0cc581686a758bd38adf0bac0fdf24b 100644 (file)
@@ -90,6 +90,10 @@ static inline void syscall_set_arguments(struct task_struct *task,
 
 static inline int syscall_get_arch(void)
 {
-       return is_32bit_task() ? AUDIT_ARCH_PPC : AUDIT_ARCH_PPC64;
+       int arch = is_32bit_task() ? AUDIT_ARCH_PPC : AUDIT_ARCH_PPC64;
+#ifdef __LITTLE_ENDIAN__
+       arch |= __AUDIT_ARCH_LE;
+#endif
+       return arch;
 }
 #endif /* _ASM_SYSCALL_H */
index 9485b43a7c00ec52d2c8be21738d6b53878babc6..a0c071d24e0e5e744969bc2eeea473b210bde785 100644 (file)
@@ -284,7 +284,7 @@ do {                                                                \
        if (!is_kernel_addr((unsigned long)__gu_addr))          \
                might_fault();                                  \
        __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
-       (x) = (__typeof__(*(ptr)))__gu_val;                     \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                               \
 })
 #endif /* __powerpc64__ */
@@ -297,7 +297,7 @@ do {                                                                \
        might_fault();                                                  \
        if (access_ok(VERIFY_READ, __gu_addr, (size)))                  \
                __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
-       (x) = (__typeof__(*(ptr)))__gu_val;                             \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                             \
        __gu_err;                                                       \
 })
 
@@ -308,7 +308,7 @@ do {                                                                \
        const __typeof__(*(ptr)) __user *__gu_addr = (ptr);     \
        __chk_user_ptr(ptr);                                    \
        __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
-       (x) = (__typeof__(*(ptr)))__gu_val;                     \
+       (x) = (__force __typeof__(*(ptr)))__gu_val;                     \
        __gu_err;                                               \
 })
 
index 24d78e1871c9d5e9855e9aaddbf867d187fd697b..e624f9646350ccfadf32c18cbe4c4517190d2072 100644 (file)
@@ -726,5 +726,16 @@ int main(void)
                                        arch.timing_last_enter.tv32.tbl));
 #endif
 
+#ifdef CONFIG_PPC_POWERNV
+       DEFINE(PACA_CORE_IDLE_STATE_PTR,
+                       offsetof(struct paca_struct, core_idle_state_ptr));
+       DEFINE(PACA_THREAD_IDLE_STATE,
+                       offsetof(struct paca_struct, thread_idle_state));
+       DEFINE(PACA_THREAD_MASK,
+                       offsetof(struct paca_struct, thread_mask));
+       DEFINE(PACA_SUBCORE_SIBLING_MASK,
+                       offsetof(struct paca_struct, subcore_sibling_mask));
+#endif
+
        return 0;
 }
index db08382e19f1ab89592dbebe4a97af1d9d8857bb..c2df8150bd7a0425fc00ebcc78d784d6b280c146 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/hw_irq.h>
 #include <asm/exception-64s.h>
 #include <asm/ptrace.h>
+#include <asm/cpuidle.h>
 
 /*
  * We layout physical memory as follows:
@@ -101,23 +102,34 @@ system_reset_pSeries:
 #ifdef CONFIG_PPC_P7_NAP
 BEGIN_FTR_SECTION
        /* Running native on arch 2.06 or later, check if we are
-        * waking up from nap. We only handle no state loss and
-        * supervisor state loss. We do -not- handle hypervisor
-        * state loss at this time.
+        * waking up from nap/sleep/winkle.
         */
        mfspr   r13,SPRN_SRR1
        rlwinm. r13,r13,47-31,30,31
        beq     9f
 
-       /* waking up from powersave (nap) state */
-       cmpwi   cr1,r13,2
-       /* Total loss of HV state is fatal, we could try to use the
-        * PIR to locate a PACA, then use an emergency stack etc...
-        * OPAL v3 based powernv platforms have new idle states
-        * which fall in this catagory.
+       cmpwi   cr3,r13,2
+
+       /*
+        * Check if last bit of HSPGR0 is set. This indicates whether we are
+        * waking up from winkle.
         */
-       bgt     cr1,8f
        GET_PACA(r13)
+       clrldi  r5,r13,63
+       clrrdi  r13,r13,1
+       cmpwi   cr4,r5,1
+       mtspr   SPRN_HSPRG0,r13
+
+       lbz     r0,PACA_THREAD_IDLE_STATE(r13)
+       cmpwi   cr2,r0,PNV_THREAD_NAP
+       bgt     cr2,8f                          /* Either sleep or Winkle */
+
+       /* Waking up from nap should not cause hypervisor state loss */
+       bgt     cr3,.
+
+       /* Waking up from nap */
+       li      r0,PNV_THREAD_RUNNING
+       stb     r0,PACA_THREAD_IDLE_STATE(r13)  /* Clear thread state */
 
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
        li      r0,KVM_HWTHREAD_IN_KERNEL
@@ -133,7 +145,7 @@ BEGIN_FTR_SECTION
 
        /* Return SRR1 from power7_nap() */
        mfspr   r3,SPRN_SRR1
-       beq     cr1,2f
+       beq     cr3,2f
        b       power7_wakeup_noloss
 2:     b       power7_wakeup_loss
 
@@ -1382,6 +1394,7 @@ machine_check_handle_early:
        MACHINE_CHECK_HANDLER_WINDUP
        GET_PACA(r13)
        ld      r1,PACAR1(r13)
+       li      r3,PNV_THREAD_NAP
        b       power7_enter_nap_mode
 4:
 #endif
index 18c0687e5ab3d2b9c4dc13587af411c755eef776..05adc8bbdef853b4c7adb27c5da88ceada1ea033 100644 (file)
 #include <asm/hw_irq.h>
 #include <asm/kvm_book3s_asm.h>
 #include <asm/opal.h>
+#include <asm/cpuidle.h>
+#include <asm/mmu-hash64.h>
 
 #undef DEBUG
 
+/*
+ * Use unused space in the interrupt stack to save and restore
+ * registers for winkle support.
+ */
+#define _SDR1  GPR3
+#define _RPR   GPR4
+#define _SPURR GPR5
+#define _PURR  GPR6
+#define _TSCR  GPR7
+#define _DSCR  GPR8
+#define _AMOR  GPR9
+#define _WORT  GPR10
+#define _WORC  GPR11
+
 /* Idle state entry routines */
 
 #define        IDLE_STATE_ENTER_SEQ(IDLE_INST)                         \
@@ -37,8 +53,7 @@
 
 /*
  * Pass requested state in r3:
- *     0 - nap
- *     1 - sleep
+ *     r3 - PNV_THREAD_NAP/SLEEP/WINKLE
  *
  * To check IRQ_HAPPENED in r4
  *     0 - don't check
@@ -101,18 +116,105 @@ _GLOBAL(power7_powersave_common)
        std     r9,_MSR(r1)
        std     r1,PACAR1(r13)
 
-_GLOBAL(power7_enter_nap_mode)
+       /*
+        * Go to real mode to do the nap, as required by the architecture.
+        * Also, we need to be in real mode before setting hwthread_state,
+        * because as soon as we do that, another thread can switch
+        * the MMU context to the guest.
+        */
+       LOAD_REG_IMMEDIATE(r5, MSR_IDLE)
+       li      r6, MSR_RI
+       andc    r6, r9, r6
+       LOAD_REG_ADDR(r7, power7_enter_nap_mode)
+       mtmsrd  r6, 1           /* clear RI before setting SRR0/1 */
+       mtspr   SPRN_SRR0, r7
+       mtspr   SPRN_SRR1, r5
+       rfid
+
+       .globl  power7_enter_nap_mode
+power7_enter_nap_mode:
 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
        /* Tell KVM we're napping */
        li      r4,KVM_HWTHREAD_IN_NAP
        stb     r4,HSTATE_HWTHREAD_STATE(r13)
 #endif
-       cmpwi   cr0,r3,1
-       beq     2f
+       stb     r3,PACA_THREAD_IDLE_STATE(r13)
+       cmpwi   cr3,r3,PNV_THREAD_SLEEP
+       bge     cr3,2f
        IDLE_STATE_ENTER_SEQ(PPC_NAP)
        /* No return */
-2:     IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
-       /* No return */
+2:
+       /* Sleep or winkle */
+       lbz     r7,PACA_THREAD_MASK(r13)
+       ld      r14,PACA_CORE_IDLE_STATE_PTR(r13)
+lwarx_loop1:
+       lwarx   r15,0,r14
+       andc    r15,r15,r7                      /* Clear thread bit */
+
+       andi.   r15,r15,PNV_CORE_IDLE_THREAD_BITS
+
+/*
+ * If cr0 = 0, then current thread is the last thread of the core entering
+ * sleep. Last thread needs to execute the hardware bug workaround code if
+ * required by the platform.
+ * Make the workaround call unconditionally here. The below branch call is
+ * patched out when the idle states are discovered if the platform does not
+ * require it.
+ */
+.global pnv_fastsleep_workaround_at_entry
+pnv_fastsleep_workaround_at_entry:
+       beq     fastsleep_workaround_at_entry
+
+       stwcx.  r15,0,r14
+       bne-    lwarx_loop1
+       isync
+
+common_enter: /* common code for all the threads entering sleep or winkle */
+       bgt     cr3,enter_winkle
+       IDLE_STATE_ENTER_SEQ(PPC_SLEEP)
+
+fastsleep_workaround_at_entry:
+       ori     r15,r15,PNV_CORE_IDLE_LOCK_BIT
+       stwcx.  r15,0,r14
+       bne-    lwarx_loop1
+       isync
+
+       /* Fast sleep workaround */
+       li      r3,1
+       li      r4,1
+       li      r0,OPAL_CONFIG_CPU_IDLE_STATE
+       bl      opal_call_realmode
+
+       /* Clear Lock bit */
+       li      r0,0
+       lwsync
+       stw     r0,0(r14)
+       b       common_enter
+
+enter_winkle:
+       /*
+        * Note all register i.e per-core, per-subcore or per-thread is saved
+        * here since any thread in the core might wake up first
+        */
+       mfspr   r3,SPRN_SDR1
+       std     r3,_SDR1(r1)
+       mfspr   r3,SPRN_RPR
+       std     r3,_RPR(r1)
+       mfspr   r3,SPRN_SPURR
+       std     r3,_SPURR(r1)
+       mfspr   r3,SPRN_PURR
+       std     r3,_PURR(r1)
+       mfspr   r3,SPRN_TSCR
+       std     r3,_TSCR(r1)
+       mfspr   r3,SPRN_DSCR
+       std     r3,_DSCR(r1)
+       mfspr   r3,SPRN_AMOR
+       std     r3,_AMOR(r1)
+       mfspr   r3,SPRN_WORT
+       std     r3,_WORT(r1)
+       mfspr   r3,SPRN_WORC
+       std     r3,_WORC(r1)
+       IDLE_STATE_ENTER_SEQ(PPC_WINKLE)
 
 _GLOBAL(power7_idle)
        /* Now check if user or arch enabled NAP mode */
@@ -125,48 +227,21 @@ _GLOBAL(power7_idle)
 
 _GLOBAL(power7_nap)
        mr      r4,r3
-       li      r3,0
+       li      r3,PNV_THREAD_NAP
        b       power7_powersave_common
        /* No return */
 
 _GLOBAL(power7_sleep)
-       li      r3,1
+       li      r3,PNV_THREAD_SLEEP
        li      r4,1
        b       power7_powersave_common
        /* No return */
 
-/*
- * Make opal call in realmode. This is a generic function to be called
- * from realmode from reset vector. It handles endianess.
- *
- * r13 - paca pointer
- * r1  - stack pointer
- * r3  - opal token
- */
-opal_call_realmode:
-       mflr    r12
-       std     r12,_LINK(r1)
-       ld      r2,PACATOC(r13)
-       /* Set opal return address */
-       LOAD_REG_ADDR(r0,return_from_opal_call)
-       mtlr    r0
-       /* Handle endian-ness */
-       li      r0,MSR_LE
-       mfmsr   r12
-       andc    r12,r12,r0
-       mtspr   SPRN_HSRR1,r12
-       mr      r0,r3                   /* Move opal token to r0 */
-       LOAD_REG_ADDR(r11,opal)
-       ld      r12,8(r11)
-       ld      r2,0(r11)
-       mtspr   SPRN_HSRR0,r12
-       hrfid
-
-return_from_opal_call:
-       FIXUP_ENDIAN
-       ld      r0,_LINK(r1)
-       mtlr    r0
-       blr
+_GLOBAL(power7_winkle)
+       li      r3,3
+       li      r4,1
+       b       power7_powersave_common
+       /* No return */
 
 #define CHECK_HMI_INTERRUPT                                            \
        mfspr   r0,SPRN_SRR1;                                           \
@@ -181,7 +256,7 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66);            \
        ld      r2,PACATOC(r13);                                        \
        ld      r1,PACAR1(r13);                                         \
        std     r3,ORIG_GPR3(r1);       /* Save original r3 */          \
-       li      r3,OPAL_HANDLE_HMI;     /* Pass opal token argument*/   \
+       li      r0,OPAL_HANDLE_HMI;     /* Pass opal token argument*/   \
        bl      opal_call_realmode;                                     \
        ld      r3,ORIG_GPR3(r1);       /* Restore original r3 */       \
 20:    nop;
@@ -190,16 +265,190 @@ ALT_FTR_SECTION_END_NESTED_IFSET(CPU_FTR_ARCH_207S, 66);         \
 _GLOBAL(power7_wakeup_tb_loss)
        ld      r2,PACATOC(r13);
        ld      r1,PACAR1(r13)
+       /*
+        * Before entering any idle state, the NVGPRs are saved in the stack
+        * and they are restored before switching to the process context. Hence
+        * until they are restored, they are free to be used.
+        *
+        * Save SRR1 in a NVGPR as it might be clobbered in opal_call_realmode
+        * (called in CHECK_HMI_INTERRUPT). SRR1 is required to determine the
+        * wakeup reason if we branch to kvm_start_guest.
+        */
 
+       mfspr   r16,SPRN_SRR1
 BEGIN_FTR_SECTION
        CHECK_HMI_INTERRUPT
 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
+
+       lbz     r7,PACA_THREAD_MASK(r13)
+       ld      r14,PACA_CORE_IDLE_STATE_PTR(r13)
+lwarx_loop2:
+       lwarx   r15,0,r14
+       andi.   r9,r15,PNV_CORE_IDLE_LOCK_BIT
+       /*
+        * Lock bit is set in one of the 2 cases-
+        * a. In the sleep/winkle enter path, the last thread is executing
+        * fastsleep workaround code.
+        * b. In the wake up path, another thread is executing fastsleep
+        * workaround undo code or resyncing timebase or restoring context
+        * In either case loop until the lock bit is cleared.
+        */
+       bne     core_idle_lock_held
+
+       cmpwi   cr2,r15,0
+       lbz     r4,PACA_SUBCORE_SIBLING_MASK(r13)
+       and     r4,r4,r15
+       cmpwi   cr1,r4,0        /* Check if first in subcore */
+
+       /*
+        * At this stage
+        * cr1 - 0b0100 if first thread to wakeup in subcore
+        * cr2 - 0b0100 if first thread to wakeup in core
+        * cr3-  0b0010 if waking up from sleep or winkle
+        * cr4 - 0b0100 if waking up from winkle
+        */
+
+       or      r15,r15,r7              /* Set thread bit */
+
+       beq     cr1,first_thread_in_subcore
+
+       /* Not first thread in subcore to wake up */
+       stwcx.  r15,0,r14
+       bne-    lwarx_loop2
+       isync
+       b       common_exit
+
+core_idle_lock_held:
+       HMT_LOW
+core_idle_lock_loop:
+       lwz     r15,0(14)
+       andi.   r9,r15,PNV_CORE_IDLE_LOCK_BIT
+       bne     core_idle_lock_loop
+       HMT_MEDIUM
+       b       lwarx_loop2
+
+first_thread_in_subcore:
+       /* First thread in subcore to wakeup */
+       ori     r15,r15,PNV_CORE_IDLE_LOCK_BIT
+       stwcx.  r15,0,r14
+       bne-    lwarx_loop2
+       isync
+
+       /*
+        * If waking up from sleep, subcore state is not lost. Hence
+        * skip subcore state restore
+        */
+       bne     cr4,subcore_state_restored
+
+       /* Restore per-subcore state */
+       ld      r4,_SDR1(r1)
+       mtspr   SPRN_SDR1,r4
+       ld      r4,_RPR(r1)
+       mtspr   SPRN_RPR,r4
+       ld      r4,_AMOR(r1)
+       mtspr   SPRN_AMOR,r4
+
+subcore_state_restored:
+       /*
+        * Check if the thread is also the first thread in the core. If not,
+        * skip to clear_lock.
+        */
+       bne     cr2,clear_lock
+
+first_thread_in_core:
+
+       /*
+        * First thread in the core waking up from fastsleep. It needs to
+        * call the fastsleep workaround code if the platform requires it.
+        * Call it unconditionally here. The below branch instruction will
+        * be patched out when the idle states are discovered if platform
+        * does not require workaround.
+        */
+.global pnv_fastsleep_workaround_at_exit
+pnv_fastsleep_workaround_at_exit:
+       b       fastsleep_workaround_at_exit
+
+timebase_resync:
+       /* Do timebase resync if we are waking up from sleep. Use cr3 value
+        * set in exceptions-64s.S */
+       ble     cr3,clear_lock
        /* Time base re-sync */
-       li      r3,OPAL_RESYNC_TIMEBASE
+       li      r0,OPAL_RESYNC_TIMEBASE
        bl      opal_call_realmode;
-
        /* TODO: Check r3 for failure */
 
+       /*
+        * If waking up from sleep, per core state is not lost, skip to
+        * clear_lock.
+        */
+       bne     cr4,clear_lock
+
+       /* Restore per core state */
+       ld      r4,_TSCR(r1)
+       mtspr   SPRN_TSCR,r4
+       ld      r4,_WORC(r1)
+       mtspr   SPRN_WORC,r4
+
+clear_lock:
+       andi.   r15,r15,PNV_CORE_IDLE_THREAD_BITS
+       lwsync
+       stw     r15,0(r14)
+
+common_exit:
+       /*
+        * Common to all threads.
+        *
+        * If waking up from sleep, hypervisor state is not lost. Hence
+        * skip hypervisor state restore.
+        */
+       bne     cr4,hypervisor_state_restored
+
+       /* Waking up from winkle */
+
+       /* Restore per thread state */
+       bl      __restore_cpu_power8
+
+       /* Restore SLB  from PACA */
+       ld      r8,PACA_SLBSHADOWPTR(r13)
+
+       .rept   SLB_NUM_BOLTED
+       li      r3, SLBSHADOW_SAVEAREA
+       LDX_BE  r5, r8, r3
+       addi    r3, r3, 8
+       LDX_BE  r6, r8, r3
+       andis.  r7,r5,SLB_ESID_V@h
+       beq     1f
+       slbmte  r6,r5
+1:     addi    r8,r8,16
+       .endr
+
+       ld      r4,_SPURR(r1)
+       mtspr   SPRN_SPURR,r4
+       ld      r4,_PURR(r1)
+       mtspr   SPRN_PURR,r4
+       ld      r4,_DSCR(r1)
+       mtspr   SPRN_DSCR,r4
+       ld      r4,_WORT(r1)
+       mtspr   SPRN_WORT,r4
+
+hypervisor_state_restored:
+
+       li      r5,PNV_THREAD_RUNNING
+       stb     r5,PACA_THREAD_IDLE_STATE(r13)
+
+       mtspr   SPRN_SRR1,r16
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+       li      r0,KVM_HWTHREAD_IN_KERNEL
+       stb     r0,HSTATE_HWTHREAD_STATE(r13)
+       /* Order setting hwthread_state vs. testing hwthread_req */
+       sync
+       lbz     r0,HSTATE_HWTHREAD_REQ(r13)
+       cmpwi   r0,0
+       beq     6f
+       b       kvm_start_guest
+6:
+#endif
+
        REST_NVGPRS(r1)
        REST_GPR(2, r1)
        ld      r3,_CCR(r1)
@@ -212,6 +461,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
        mtspr   SPRN_SRR0,r5
        rfid
 
+fastsleep_workaround_at_exit:
+       li      r3,1
+       li      r4,0
+       li      r0,OPAL_CONFIG_CPU_IDLE_STATE
+       bl      opal_call_realmode
+       b       timebase_resync
+
 /*
  * R3 here contains the value that will be returned to the caller
  * of power7_nap.
index 8b2d2dc8ef106ef780c9a145335e9de17b3879a7..8ec017cb44461943c90ebdb6cdf6e007936efb39 100644 (file)
@@ -700,7 +700,6 @@ void start_secondary(void *unused)
        smp_store_cpu_info(cpu);
        set_dec(tb_ticks_per_jiffy);
        preempt_disable();
-       cpu_callin_map[cpu] = 1;
 
        if (smp_ops->setup_cpu)
                smp_ops->setup_cpu(cpu);
@@ -739,6 +738,14 @@ void start_secondary(void *unused)
        notify_cpu_starting(cpu);
        set_cpu_online(cpu, true);
 
+       /*
+        * CPU must be marked active and online before we signal back to the
+        * master, because the scheduler needs to see the cpu_online and
+        * cpu_active bits set.
+        */
+       smp_wmb();
+       cpu_callin_map[cpu] = 1;
+
        local_irq_enable();
 
        cpu_startup_entry(CPUHP_ONLINE);
index dba34088da2896187a390ee720af2a4a65bc3216..f162d0b8eea380c6f00e026355309426fba4a229 100644 (file)
@@ -177,7 +177,7 @@ static ssize_t _name##_show(struct device *dev,                     \
        }                                                       \
        ret = sprintf(buf, _fmt, _expr);                        \
 e_free:                                                                \
-       kfree(page);                                            \
+       kmem_cache_free(hv_page_cache, page);                   \
        return ret;                                             \
 }                                                              \
 static DEVICE_ATTR_RO(_name)
@@ -217,11 +217,14 @@ static bool is_physical_domain(int domain)
                domain == HV_24X7_PERF_DOMAIN_PHYSICAL_CORE;
 }
 
+DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096);
+DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096);
+
 static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
                                         u16 lpar, u64 *res,
                                         bool success_expected)
 {
-       unsigned long ret = -ENOMEM;
+       unsigned long ret;
 
        /*
         * request_buffer and result_buffer are not required to be 4k aligned,
@@ -243,13 +246,11 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
        BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
        BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
 
-       request_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER);
-       if (!request_buffer)
-               goto out;
+       request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
+       result_buffer = (void *)get_cpu_var(hv_24x7_resb);
 
-       result_buffer = kmem_cache_zalloc(hv_page_cache, GFP_USER);
-       if (!result_buffer)
-               goto out_free_request_buffer;
+       memset(request_buffer, 0, 4096);
+       memset(result_buffer, 0, 4096);
 
        *request_buffer = (struct reqb) {
                .buf = {
@@ -278,15 +279,11 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
                                domain, offset, ix, lpar, ret, ret,
                                result_buffer->buf.detailed_rc,
                                result_buffer->buf.failing_request_ix);
-               goto out_free_result_buffer;
+               goto out;
        }
 
        *res = be64_to_cpu(result_buffer->result);
 
-out_free_result_buffer:
-       kfree(result_buffer);
-out_free_request_buffer:
-       kfree(request_buffer);
 out:
        return ret;
 }
index 0a299be588af295bcd384bd40b2ecd332586f317..54eca8b3b288f8fcca45ef5f44c5211832b3416f 100644 (file)
@@ -158,6 +158,43 @@ opal_tracepoint_return:
        blr
 #endif
 
+/*
+ * Make opal call in realmode. This is a generic function to be called
+ * from realmode. It handles endianness.
+ *
+ * r13 - paca pointer
+ * r1  - stack pointer
+ * r0  - opal token
+ */
+_GLOBAL(opal_call_realmode)
+       mflr    r12
+       std     r12,PPC_LR_STKOFF(r1)
+       ld      r2,PACATOC(r13)
+       /* Set opal return address */
+       LOAD_REG_ADDR(r12,return_from_opal_call)
+       mtlr    r12
+
+       mfmsr   r12
+#ifdef __LITTLE_ENDIAN__
+       /* Handle endian-ness */
+       li      r11,MSR_LE
+       andc    r12,r12,r11
+#endif
+       mtspr   SPRN_HSRR1,r12
+       LOAD_REG_ADDR(r11,opal)
+       ld      r12,8(r11)
+       ld      r2,0(r11)
+       mtspr   SPRN_HSRR0,r12
+       hrfid
+
+return_from_opal_call:
+#ifdef __LITTLE_ENDIAN__
+       FIXUP_ENDIAN
+#endif
+       ld      r12,PPC_LR_STKOFF(r1)
+       mtlr    r12
+       blr
+
 OPAL_CALL(opal_invalid_call,                   OPAL_INVALID_CALL);
 OPAL_CALL(opal_console_write,                  OPAL_CONSOLE_WRITE);
 OPAL_CALL(opal_console_read,                   OPAL_CONSOLE_READ);
@@ -247,6 +284,7 @@ OPAL_CALL(opal_sensor_read,                 OPAL_SENSOR_READ);
 OPAL_CALL(opal_get_param,                      OPAL_GET_PARAM);
 OPAL_CALL(opal_set_param,                      OPAL_SET_PARAM);
 OPAL_CALL(opal_handle_hmi,                     OPAL_HANDLE_HMI);
+OPAL_CALL(opal_slw_set_reg,                    OPAL_SLW_SET_REG);
 OPAL_CALL(opal_register_dump_region,           OPAL_REGISTER_DUMP_REGION);
 OPAL_CALL(opal_unregister_dump_region,         OPAL_UNREGISTER_DUMP_REGION);
 OPAL_CALL(opal_pci_set_phb_cxl_mode,           OPAL_PCI_SET_PHB_CXL_MODE);
@@ -254,3 +292,4 @@ OPAL_CALL(opal_tpo_write,                   OPAL_WRITE_TPO);
 OPAL_CALL(opal_tpo_read,                       OPAL_READ_TPO);
 OPAL_CALL(opal_ipmi_send,                      OPAL_IPMI_SEND);
 OPAL_CALL(opal_ipmi_recv,                      OPAL_IPMI_RECV);
+OPAL_CALL(opal_i2c_request,                    OPAL_I2C_REQUEST);
index cb0b6de79cd4f12ff50cc40508f9db3352f67429..f10b9ec8c1f5c16fe9057014f5582791ee75a511 100644 (file)
@@ -9,8 +9,9 @@
  * 2 of the License, or (at your option) any later version.
  */
 
-#undef DEBUG
+#define pr_fmt(fmt)    "opal: " fmt
 
+#include <linux/printk.h>
 #include <linux/types.h>
 #include <linux/of.h>
 #include <linux/of_fdt.h>
@@ -625,6 +626,39 @@ static int opal_sysfs_init(void)
        return 0;
 }
 
+static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
+                              struct bin_attribute *bin_attr,
+                              char *buf, loff_t off, size_t count)
+{
+       return memory_read_from_buffer(buf, count, &off, bin_attr->private,
+                                      bin_attr->size);
+}
+
+static BIN_ATTR_RO(symbol_map, 0);
+
+static void opal_export_symmap(void)
+{
+       const __be64 *syms;
+       unsigned int size;
+       struct device_node *fw;
+       int rc;
+
+       fw = of_find_node_by_path("/ibm,opal/firmware");
+       if (!fw)
+               return;
+       syms = of_get_property(fw, "symbol-map", &size);
+       if (!syms || size != 2 * sizeof(__be64))
+               return;
+
+       /* Setup attributes */
+       bin_attr_symbol_map.private = __va(be64_to_cpu(syms[0]));
+       bin_attr_symbol_map.size = be64_to_cpu(syms[1]);
+
+       rc = sysfs_create_bin_file(opal_kobj, &bin_attr_symbol_map);
+       if (rc)
+               pr_warn("Error %d creating OPAL symbols file\n", rc);
+}
+
 static void __init opal_dump_region_init(void)
 {
        void *addr;
@@ -653,6 +687,14 @@ static void opal_ipmi_init(struct device_node *opal_node)
                        of_platform_device_create(np, NULL, NULL);
 }
 
+static void opal_i2c_create_devs(void)
+{
+       struct device_node *np;
+
+       for_each_compatible_node(np, NULL, "ibm,opal-i2c")
+               of_platform_device_create(np, NULL, NULL);
+}
+
 static int __init opal_init(void)
 {
        struct device_node *np, *consoles;
@@ -679,6 +721,9 @@ static int __init opal_init(void)
                of_node_put(consoles);
        }
 
+       /* Create i2c platform devices */
+       opal_i2c_create_devs();
+
        /* Find all OPAL interrupts and request them */
        irqs = of_get_property(opal_node, "opal-interrupts", &irqlen);
        pr_debug("opal: Found %d interrupts reserved for OPAL\n",
@@ -702,6 +747,8 @@ static int __init opal_init(void)
        /* Create "opal" kobject under /sys/firmware */
        rc = opal_sysfs_init();
        if (rc == 0) {
+               /* Export symbol map to userspace */
+               opal_export_symmap();
                /* Setup dump region interface */
                opal_dump_region_init();
                /* Setup error log interface */
@@ -824,3 +871,4 @@ EXPORT_SYMBOL_GPL(opal_rtc_read);
 EXPORT_SYMBOL_GPL(opal_rtc_write);
 EXPORT_SYMBOL_GPL(opal_tpo_read);
 EXPORT_SYMBOL_GPL(opal_tpo_write);
+EXPORT_SYMBOL_GPL(opal_i2c_request);
index 6c8e2d188cd096330b7b1926da13c50de1e73f2e..604c48e7879a081cfea23a57f863328dc04d7688 100644 (file)
@@ -29,6 +29,8 @@ static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
 }
 #endif
 
+extern u32 pnv_get_supported_cpuidle_states(void);
+
 extern void pnv_lpc_init(void);
 
 bool cpu_core_split_required(void);
index 30b1c3e298a64aae1fc0b8fc609ea449b2955027..b700a329c31d448444d0f48e0fdd0b9176f1a825 100644 (file)
 #include <asm/opal.h>
 #include <asm/kexec.h>
 #include <asm/smp.h>
+#include <asm/cputhreads.h>
+#include <asm/cpuidle.h>
+#include <asm/code-patching.h>
 
 #include "powernv.h"
+#include "subcore.h"
 
 static void __init pnv_setup_arch(void)
 {
@@ -288,6 +292,168 @@ static void __init pnv_setup_machdep_rtas(void)
 }
 #endif /* CONFIG_PPC_POWERNV_RTAS */
 
+static u32 supported_cpuidle_states;
+
+int pnv_save_sprs_for_winkle(void)
+{
+       int cpu;
+       int rc;
+
+       /*
+        * hid0, hid1, hid4, hid5, hmeer and lpcr values are symmetric accross
+        * all cpus at boot. Get these reg values of current cpu and use the
+        * same accross all cpus.
+        */
+       uint64_t lpcr_val = mfspr(SPRN_LPCR);
+       uint64_t hid0_val = mfspr(SPRN_HID0);
+       uint64_t hid1_val = mfspr(SPRN_HID1);
+       uint64_t hid4_val = mfspr(SPRN_HID4);
+       uint64_t hid5_val = mfspr(SPRN_HID5);
+       uint64_t hmeer_val = mfspr(SPRN_HMEER);
+
+       for_each_possible_cpu(cpu) {
+               uint64_t pir = get_hard_smp_processor_id(cpu);
+               uint64_t hsprg0_val = (uint64_t)&paca[cpu];
+
+               /*
+                * HSPRG0 is used to store the cpu's pointer to paca. Hence last
+                * 3 bits are guaranteed to be 0. Program slw to restore HSPRG0
+                * with 63rd bit set, so that when a thread wakes up at 0x100 we
+                * can use this bit to distinguish between fastsleep and
+                * deep winkle.
+                */
+               hsprg0_val |= 1;
+
+               rc = opal_slw_set_reg(pir, SPRN_HSPRG0, hsprg0_val);
+               if (rc != 0)
+                       return rc;
+
+               rc = opal_slw_set_reg(pir, SPRN_LPCR, lpcr_val);
+               if (rc != 0)
+                       return rc;
+
+               /* HIDs are per core registers */
+               if (cpu_thread_in_core(cpu) == 0) {
+
+                       rc = opal_slw_set_reg(pir, SPRN_HMEER, hmeer_val);
+                       if (rc != 0)
+                               return rc;
+
+                       rc = opal_slw_set_reg(pir, SPRN_HID0, hid0_val);
+                       if (rc != 0)
+                               return rc;
+
+                       rc = opal_slw_set_reg(pir, SPRN_HID1, hid1_val);
+                       if (rc != 0)
+                               return rc;
+
+                       rc = opal_slw_set_reg(pir, SPRN_HID4, hid4_val);
+                       if (rc != 0)
+                               return rc;
+
+                       rc = opal_slw_set_reg(pir, SPRN_HID5, hid5_val);
+                       if (rc != 0)
+                               return rc;
+               }
+       }
+
+       return 0;
+}
+
+static void pnv_alloc_idle_core_states(void)
+{
+       int i, j;
+       int nr_cores = cpu_nr_cores();
+       u32 *core_idle_state;
+
+       /*
+        * core_idle_state - First 8 bits track the idle state of each thread
+        * of the core. The 8th bit is the lock bit. Initially all thread bits
+        * are set. They are cleared when the thread enters deep idle state
+        * like sleep and winkle. Initially the lock bit is cleared.
+        * The lock bit has 2 purposes
+        * a. While the first thread is restoring core state, it prevents
+        * other threads in the core from switching to process context.
+        * b. While the last thread in the core is saving the core state, it
+        * prevents a different thread from waking up.
+        */
+       for (i = 0; i < nr_cores; i++) {
+               int first_cpu = i * threads_per_core;
+               int node = cpu_to_node(first_cpu);
+
+               core_idle_state = kmalloc_node(sizeof(u32), GFP_KERNEL, node);
+               *core_idle_state = PNV_CORE_IDLE_THREAD_BITS;
+
+               for (j = 0; j < threads_per_core; j++) {
+                       int cpu = first_cpu + j;
+
+                       paca[cpu].core_idle_state_ptr = core_idle_state;
+                       paca[cpu].thread_idle_state = PNV_THREAD_RUNNING;
+                       paca[cpu].thread_mask = 1 << j;
+               }
+       }
+
+       update_subcore_sibling_mask();
+
+       if (supported_cpuidle_states & OPAL_PM_WINKLE_ENABLED)
+               pnv_save_sprs_for_winkle();
+}
+
+u32 pnv_get_supported_cpuidle_states(void)
+{
+       return supported_cpuidle_states;
+}
+EXPORT_SYMBOL_GPL(pnv_get_supported_cpuidle_states);
+
+static int __init pnv_init_idle_states(void)
+{
+       struct device_node *power_mgt;
+       int dt_idle_states;
+       const __be32 *idle_state_flags;
+       u32 len_flags, flags;
+       int i;
+
+       supported_cpuidle_states = 0;
+
+       if (cpuidle_disable != IDLE_NO_OVERRIDE)
+               return 0;
+
+       if (!firmware_has_feature(FW_FEATURE_OPALv3))
+               return 0;
+
+       power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
+       if (!power_mgt) {
+               pr_warn("opal: PowerMgmt Node not found\n");
+               return 0;
+       }
+
+       idle_state_flags = of_get_property(power_mgt,
+                       "ibm,cpu-idle-state-flags", &len_flags);
+       if (!idle_state_flags) {
+               pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
+               return 0;
+       }
+
+       dt_idle_states = len_flags / sizeof(u32);
+
+       for (i = 0; i < dt_idle_states; i++) {
+               flags = be32_to_cpu(idle_state_flags[i]);
+               supported_cpuidle_states |= flags;
+       }
+       if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
+               patch_instruction(
+                       (unsigned int *)pnv_fastsleep_workaround_at_entry,
+                       PPC_INST_NOP);
+               patch_instruction(
+                       (unsigned int *)pnv_fastsleep_workaround_at_exit,
+                       PPC_INST_NOP);
+       }
+       pnv_alloc_idle_core_states();
+       return 0;
+}
+
+subsys_initcall(pnv_init_idle_states);
+
 static int __init pnv_probe(void)
 {
        unsigned long root = of_get_flat_dt_root();
index b716f666e48ae96c974dd01d0bb5eb40c0197b7b..fc34025ef82270b00c7c32811dca20552a61d2fe 100644 (file)
@@ -150,6 +150,7 @@ static void pnv_smp_cpu_kill_self(void)
 {
        unsigned int cpu;
        unsigned long srr1;
+       u32 idle_states;
 
        /* Standard hot unplug procedure */
        local_irq_disable();
@@ -160,13 +161,23 @@ static void pnv_smp_cpu_kill_self(void)
        generic_set_cpu_dead(cpu);
        smp_wmb();
 
+       idle_states = pnv_get_supported_cpuidle_states();
        /* We don't want to take decrementer interrupts while we are offline,
         * so clear LPCR:PECE1. We keep PECE2 enabled.
         */
        mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
        while (!generic_check_cpu_restart(cpu)) {
+
                ppc64_runlatch_off();
-               srr1 = power7_nap(1);
+
+               if (idle_states & OPAL_PM_WINKLE_ENABLED)
+                       srr1 = power7_winkle();
+               else if ((idle_states & OPAL_PM_SLEEP_ENABLED) ||
+                               (idle_states & OPAL_PM_SLEEP_ENABLED_ER1))
+                       srr1 = power7_sleep();
+               else
+                       srr1 = power7_nap(1);
+
                ppc64_runlatch_on();
 
                /*
@@ -198,13 +209,27 @@ static void pnv_smp_cpu_kill_self(void)
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
+static int pnv_cpu_bootable(unsigned int nr)
+{
+       /*
+        * Starting with POWER8, the subcore logic relies on all threads of a
+        * core being booted so that they can participate in split mode
+        * switches. So on those machines we ignore the smt_enabled_at_boot
+        * setting (smt-enabled on the kernel command line).
+        */
+       if (cpu_has_feature(CPU_FTR_ARCH_207S))
+               return 1;
+
+       return smp_generic_cpu_bootable(nr);
+}
+
 static struct smp_ops_t pnv_smp_ops = {
        .message_pass   = smp_muxed_ipi_message_pass,
        .cause_ipi      = NULL, /* Filled at runtime by xics_smp_probe() */
        .probe          = xics_smp_probe,
        .kick_cpu       = pnv_smp_kick_cpu,
        .setup_cpu      = pnv_smp_setup_cpu,
-       .cpu_bootable   = smp_generic_cpu_bootable,
+       .cpu_bootable   = pnv_cpu_bootable,
 #ifdef CONFIG_HOTPLUG_CPU
        .cpu_disable    = pnv_smp_cpu_disable,
        .cpu_die        = generic_cpu_die,
index c87f96b79d1a02ed5f5371a2d02fc509deb3f3a1..f60f80ada9039451466a6b90f77bf091ade405e0 100644 (file)
@@ -160,6 +160,18 @@ static void wait_for_sync_step(int step)
        mb();
 }
 
+static void update_hid_in_slw(u64 hid0)
+{
+       u64 idle_states = pnv_get_supported_cpuidle_states();
+
+       if (idle_states & OPAL_PM_WINKLE_ENABLED) {
+               /* OPAL call to patch slw with the new HID0 value */
+               u64 cpu_pir = hard_smp_processor_id();
+
+               opal_slw_set_reg(cpu_pir, SPRN_HID0, hid0);
+       }
+}
+
 static void unsplit_core(void)
 {
        u64 hid0, mask;
@@ -179,6 +191,7 @@ static void unsplit_core(void)
        hid0 = mfspr(SPRN_HID0);
        hid0 &= ~HID0_POWER8_DYNLPARDIS;
        mtspr(SPRN_HID0, hid0);
+       update_hid_in_slw(hid0);
 
        while (mfspr(SPRN_HID0) & mask)
                cpu_relax();
@@ -215,6 +228,7 @@ static void split_core(int new_mode)
        hid0  = mfspr(SPRN_HID0);
        hid0 |= HID0_POWER8_DYNLPARDIS | split_parms[i].value;
        mtspr(SPRN_HID0, hid0);
+       update_hid_in_slw(hid0);
 
        /* Wait for it to happen */
        while (!(mfspr(SPRN_HID0) & split_parms[i].mask))
@@ -251,6 +265,25 @@ bool cpu_core_split_required(void)
        return true;
 }
 
+void update_subcore_sibling_mask(void)
+{
+       int cpu;
+       /*
+        * sibling mask for the first cpu. Left shift this by required bits
+        * to get sibling mask for the rest of the cpus.
+        */
+       int sibling_mask_first_cpu =  (1 << threads_per_subcore) - 1;
+
+       for_each_possible_cpu(cpu) {
+               int tid = cpu_thread_in_core(cpu);
+               int offset = (tid / threads_per_subcore) * threads_per_subcore;
+               int mask = sibling_mask_first_cpu << offset;
+
+               paca[cpu].subcore_sibling_mask = mask;
+
+       }
+}
+
 static int cpu_update_split_mode(void *data)
 {
        int cpu, new_mode = *(int *)data;
@@ -284,6 +317,7 @@ static int cpu_update_split_mode(void *data)
                /* Make the new mode public */
                subcores_per_core = new_mode;
                threads_per_subcore = threads_per_core / subcores_per_core;
+               update_subcore_sibling_mask();
 
                /* Make sure the new mode is written before we exit */
                mb();
index 148abc91debfd84735ac6c8967315dded6f8e8a9..84e02ae52895078036ea656f608b2c719f7ad994 100644 (file)
 #define SYNC_STEP_FINISHED     3       /* Set by secondary when split/unsplit is done */
 
 #ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
 void split_core_secondary_loop(u8 *state);
-#endif
+extern void update_subcore_sibling_mask(void);
+#else
+static inline void update_subcore_sibling_mask(void) { };
+#endif /* CONFIG_SMP */
+
+#endif /* __ASSEMBLY__ */
index 8b9ccf02a2c5d9b132d06a2c9c7a1916f64084bb..8a1be901773055c407d2ca7327707fdc6a70ddc9 100644 (file)
@@ -227,12 +227,10 @@ static void ipte_lock_simple(struct kvm_vcpu *vcpu)
                goto out;
        ic = &vcpu->kvm->arch.sca->ipte_control;
        do {
-               old = *ic;
-               barrier();
+               old = READ_ONCE(*ic);
                while (old.k) {
                        cond_resched();
-                       old = *ic;
-                       barrier();
+                       old = READ_ONCE(*ic);
                }
                new = old;
                new.k = 1;
@@ -251,8 +249,7 @@ static void ipte_unlock_simple(struct kvm_vcpu *vcpu)
                goto out;
        ic = &vcpu->kvm->arch.sca->ipte_control;
        do {
-               old = *ic;
-               barrier();
+               old = READ_ONCE(*ic);
                new = old;
                new.k = 0;
        } while (cmpxchg(&ic->val, old.val, new.val) != old.val);
@@ -267,12 +264,10 @@ static void ipte_lock_siif(struct kvm_vcpu *vcpu)
 
        ic = &vcpu->kvm->arch.sca->ipte_control;
        do {
-               old = *ic;
-               barrier();
+               old = READ_ONCE(*ic);
                while (old.kg) {
                        cond_resched();
-                       old = *ic;
-                       barrier();
+                       old = READ_ONCE(*ic);
                }
                new = old;
                new.k = 1;
@@ -286,8 +281,7 @@ static void ipte_unlock_siif(struct kvm_vcpu *vcpu)
 
        ic = &vcpu->kvm->arch.sca->ipte_control;
        do {
-               old = *ic;
-               barrier();
+               old = READ_ONCE(*ic);
                new = old;
                new.kh--;
                if (!new.kh)
index c6b6ee5f38b2dd43f612999ea2516254bf049868..0f09f5285d5e729ee6d97a7ca2a7e8c4e20d668a 100644 (file)
@@ -223,7 +223,7 @@ config CPU_SHX3
 config ARCH_SHMOBILE
        bool
        select ARCH_SUSPEND_POSSIBLE
-       select PM_RUNTIME
+       select PM
 
 config CPU_HAS_PMU
        depends on CPU_SH4 || CPU_SH4A
index ec70475da8906ae301540dd5c21dff349ab8d13f..a8d975793b6dcd2afdfafe2f475e6491ed99e8e4 100644 (file)
@@ -47,7 +47,7 @@ CONFIG_PREEMPT=y
 CONFIG_BINFMT_MISC=y
 CONFIG_PM=y
 CONFIG_PM_DEBUG=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_CPU_IDLE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
index 76a76a295d7471668dcc9b6b880533839b8e434d..e7e56a4131b47dea3515c74be228ffba2d62e651 100644 (file)
@@ -82,7 +82,7 @@ CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y
 CONFIG_BINFMT_MISC=y
 CONFIG_PM=y
 CONFIG_PM_DEBUG=y
-CONFIG_PM_RUNTIME=y
+CONFIG_PM=y
 CONFIG_CPU_IDLE=y
 CONFIG_NET=y
 CONFIG_PACKET=y
index d69f1cd87fd996bcf7052d7abdf2ad43325402b0..ba397bde79482043d46e0e90d0bd6d7e71daa110 100644 (file)
@@ -249,10 +249,6 @@ config HAVE_INTEL_TXT
        def_bool y
        depends on INTEL_IOMMU && ACPI
 
-config X86_INTEL_MPX
-       def_bool y
-       depends on CPU_SUP_INTEL
-
 config X86_32_SMP
        def_bool y
        depends on X86_32 && SMP
@@ -887,11 +883,11 @@ config X86_UP_IOAPIC
 config X86_LOCAL_APIC
        def_bool y
        depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
+       select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
 
 config X86_IO_APIC
-       def_bool y
-       depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
-       select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
+       def_bool X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC
+       depends on X86_LOCAL_APIC
        select IRQ_DOMAIN
 
 config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
@@ -1594,6 +1590,32 @@ config X86_SMAP
 
          If unsure, say Y.
 
+config X86_INTEL_MPX
+       prompt "Intel MPX (Memory Protection Extensions)"
+       def_bool n
+       depends on CPU_SUP_INTEL
+       ---help---
+         MPX provides hardware features that can be used in
+         conjunction with compiler-instrumented code to check
+         memory references.  It is designed to detect buffer
+         overflow or underflow bugs.
+
+         This option enables running applications which are
+         instrumented or otherwise use MPX.  It does not use MPX
+         itself inside the kernel or to protect the kernel
+         against bad memory references.
+
+         Enabling this option will make the kernel larger:
+         ~8k of kernel text and 36 bytes of data on a 64-bit
+         defconfig.  It adds a long to the 'mm_struct' which
+         will increase the kernel memory overhead of each
+         process and adds some branches to paths used during
+         exec() and munmap().
+
+         For details, see Documentation/x86/intel_mpx.txt
+
+         If unsure, say N.
+
 config EFI
        bool "EFI runtime service support"
        depends on ACPI
index 4615906d83df5b9b606f2f83f9d2f6a90590a297..9662290e0b2075ab42608af776abbe4a4219b6fd 100644 (file)
@@ -94,30 +94,7 @@ extern void trace_call_function_single_interrupt(void);
 #define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
 #endif /* CONFIG_TRACING */
 
-/* IOAPIC */
-#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1<<(x)) & io_apic_irqs))
-extern unsigned long io_apic_irqs;
-
-extern void setup_IO_APIC(void);
-extern void disable_IO_APIC(void);
-
-struct io_apic_irq_attr {
-       int ioapic;
-       int ioapic_pin;
-       int trigger;
-       int polarity;
-};
-
-static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
-                                       int ioapic, int ioapic_pin,
-                                       int trigger, int polarity)
-{
-       irq_attr->ioapic        = ioapic;
-       irq_attr->ioapic_pin    = ioapic_pin;
-       irq_attr->trigger       = trigger;
-       irq_attr->polarity      = polarity;
-}
-
+#ifdef CONFIG_IRQ_REMAP
 /* Intel specific interrupt remapping information */
 struct irq_2_iommu {
        struct intel_iommu *iommu;
@@ -131,14 +108,12 @@ struct irq_2_irte {
        u16 devid; /* Device ID for IRTE table */
        u16 index; /* Index into IRTE table*/
 };
+#endif /* CONFIG_IRQ_REMAP */
+
+#ifdef CONFIG_X86_LOCAL_APIC
+struct irq_data;
 
-/*
- * This is performance-critical, we want to do it O(1)
- *
- * Most irqs are mapped 1:1 with pins.
- */
 struct irq_cfg {
-       struct irq_pin_list     *irq_2_pin;
        cpumask_var_t           domain;
        cpumask_var_t           old_domain;
        u8                      vector;
@@ -150,18 +125,39 @@ struct irq_cfg {
                struct irq_2_irte  irq_2_irte;
        };
 #endif
+       union {
+#ifdef CONFIG_X86_IO_APIC
+               struct {
+                       struct list_head        irq_2_pin;
+               };
+#endif
+       };
 };
 
+extern struct irq_cfg *irq_cfg(unsigned int irq);
+extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
+extern struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
+extern void lock_vector_lock(void);
+extern void unlock_vector_lock(void);
 extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
+extern void clear_irq_vector(int irq, struct irq_cfg *cfg);
+extern void setup_vector_irq(int cpu);
+#ifdef CONFIG_SMP
 extern void send_cleanup_vector(struct irq_cfg *);
+extern void irq_complete_move(struct irq_cfg *cfg);
+#else
+static inline void send_cleanup_vector(struct irq_cfg *c) { }
+static inline void irq_complete_move(struct irq_cfg *c) { }
+#endif
 
-struct irq_data;
-int __ioapic_set_affinity(struct irq_data *, const struct cpumask *,
-                         unsigned int *dest_id);
-extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin, struct io_apic_irq_attr *irq_attr);
-extern void setup_ioapic_dest(void);
-
-extern void enable_IO_APIC(void);
+extern int apic_retrigger_irq(struct irq_data *data);
+extern void apic_ack_edge(struct irq_data *data);
+extern int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
+                            unsigned int *dest_id);
+#else  /*  CONFIG_X86_LOCAL_APIC */
+static inline void lock_vector_lock(void) {}
+static inline void unlock_vector_lock(void) {}
+#endif /* CONFIG_X86_LOCAL_APIC */
 
 /* Statistics */
 extern atomic_t irq_err_count;
@@ -185,7 +181,8 @@ extern __visible void smp_call_function_single_interrupt(struct pt_regs *);
 extern __visible void smp_invalidate_interrupt(struct pt_regs *);
 #endif
 
-extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
+extern void (*__initconst interrupt[FIRST_SYSTEM_VECTOR
+                                   - FIRST_EXTERNAL_VECTOR])(void);
 #ifdef CONFIG_TRACING
 #define trace_interrupt interrupt
 #endif
@@ -195,17 +192,6 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
 
 typedef int vector_irq_t[NR_VECTORS];
 DECLARE_PER_CPU(vector_irq_t, vector_irq);
-extern void setup_vector_irq(int cpu);
-
-#ifdef CONFIG_X86_IO_APIC
-extern void lock_vector_lock(void);
-extern void unlock_vector_lock(void);
-extern void __setup_vector_irq(int cpu);
-#else
-static inline void lock_vector_lock(void) {}
-static inline void unlock_vector_lock(void) {}
-static inline void __setup_vector_irq(int cpu) {}
-#endif
 
 #endif /* !ASSEMBLY_ */
 
index 1733ab49ac5e3f878eb7c3ad69bf23df068ea258..bf006cce94181ce72346b51c6de22dc24ebb908f 100644 (file)
@@ -132,6 +132,10 @@ extern int noioapicquirk;
 /* -1 if "noapic" boot option passed */
 extern int noioapicreroute;
 
+extern unsigned long io_apic_irqs;
+
+#define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1 << (x)) & io_apic_irqs))
+
 /*
  * If we use the IO-APIC for IRQ routing, disable automatic
  * assignment of PCI IRQ's.
@@ -139,18 +143,15 @@ extern int noioapicreroute;
 #define io_apic_assign_pci_irqs \
        (mp_irq_entries && !skip_ioapic_setup && io_apic_irqs)
 
-struct io_apic_irq_attr;
 struct irq_cfg;
 extern void ioapic_insert_resources(void);
+extern int arch_early_ioapic_init(void);
 
 extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
                                     unsigned int, int,
                                     struct io_apic_irq_attr *);
 extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg);
 
-extern void native_compose_msi_msg(struct pci_dev *pdev,
-                                  unsigned int irq, unsigned int dest,
-                                  struct msi_msg *msg, u8 hpet_id);
 extern void native_eoi_ioapic_pin(int apic, int pin, int vector);
 
 extern int save_ioapic_entries(void);
@@ -160,6 +161,13 @@ extern int restore_ioapic_entries(void);
 extern void setup_ioapic_ids_from_mpc(void);
 extern void setup_ioapic_ids_from_mpc_nocheck(void);
 
+struct io_apic_irq_attr {
+       int ioapic;
+       int ioapic_pin;
+       int trigger;
+       int polarity;
+};
+
 enum ioapic_domain_type {
        IOAPIC_DOMAIN_INVALID,
        IOAPIC_DOMAIN_LEGACY,
@@ -188,8 +196,10 @@ extern int mp_find_ioapic_pin(int ioapic, u32 gsi);
 extern u32 mp_pin_to_gsi(int ioapic, int pin);
 extern int mp_map_gsi_to_irq(u32 gsi, unsigned int flags);
 extern void mp_unmap_irq(int irq);
-extern void __init mp_register_ioapic(int id, u32 address, u32 gsi_base,
-                                     struct ioapic_domain_cfg *cfg);
+extern int mp_register_ioapic(int id, u32 address, u32 gsi_base,
+                             struct ioapic_domain_cfg *cfg);
+extern int mp_unregister_ioapic(u32 gsi_base);
+extern int mp_ioapic_registered(u32 gsi_base);
 extern int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq,
                            irq_hw_number_t hwirq);
 extern void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq);
@@ -227,19 +237,25 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned
 
 extern void io_apic_eoi(unsigned int apic, unsigned int vector);
 
-extern bool mp_should_keep_irq(struct device *dev);
-
+extern void setup_IO_APIC(void);
+extern void enable_IO_APIC(void);
+extern void disable_IO_APIC(void);
+extern void setup_ioapic_dest(void);
+extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin);
+extern void print_IO_APICs(void);
 #else  /* !CONFIG_X86_IO_APIC */
 
+#define IO_APIC_IRQ(x)         0
 #define io_apic_assign_pci_irqs 0
 #define setup_ioapic_ids_from_mpc x86_init_noop
 static inline void ioapic_insert_resources(void) { }
+static inline int arch_early_ioapic_init(void) { return 0; }
+static inline void print_IO_APICs(void) {}
 #define gsi_top (NR_IRQS_LEGACY)
 static inline int mp_find_ioapic(u32 gsi) { return 0; }
 static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; }
 static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; }
 static inline void mp_unmap_irq(int irq) { }
-static inline bool mp_should_keep_irq(struct device *dev) { return 1; }
 
 static inline int save_ioapic_entries(void)
 {
@@ -262,7 +278,6 @@ static inline void disable_ioapic_support(void) { }
 #define native_io_apic_print_entries   NULL
 #define native_ioapic_set_affinity     NULL
 #define native_setup_ioapic_entry      NULL
-#define native_compose_msi_msg         NULL
 #define native_eoi_ioapic_pin          NULL
 #endif
 
index 5702d7e3111db94facc17423b882d46617bd64fe..666c89ec4bd7298c1114e7e220a3fee3ebe77bc8 100644 (file)
 
 #define NR_VECTORS                      256
 
+#ifdef CONFIG_X86_LOCAL_APIC
+#define FIRST_SYSTEM_VECTOR            LOCAL_TIMER_VECTOR
+#else
+#define FIRST_SYSTEM_VECTOR            NR_VECTORS
+#endif
+
 #define FPU_IRQ                                  13
 
 #define        FIRST_VM86_IRQ                     3
index 0892ea0e683f01dbe6e79b649922a221db00b35c..4e370a5d81170e4fb4c6fa5d1abaf451c87cf502 100644 (file)
@@ -96,12 +96,15 @@ extern void pci_iommu_alloc(void);
 #ifdef CONFIG_PCI_MSI
 /* implemented in arch/x86/kernel/apic/io_apic. */
 struct msi_desc;
+void native_compose_msi_msg(struct pci_dev *pdev, unsigned int irq,
+                           unsigned int dest, struct msi_msg *msg, u8 hpet_id);
 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
 void native_teardown_msi_irq(unsigned int irq);
 void native_restore_msi_irqs(struct pci_dev *dev);
 int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
                  unsigned int irq_base, unsigned int irq_offset);
 #else
+#define native_compose_msi_msg         NULL
 #define native_setup_msi_irqs          NULL
 #define native_teardown_msi_irq                NULL
 #endif
index fa1195dae42541aaa1d836782a3a65aa25640e74..164e3f8d3c3dbb6eb4fc0ea60e01cae15fe5116e 100644 (file)
@@ -93,6 +93,8 @@ extern raw_spinlock_t pci_config_lock;
 extern int (*pcibios_enable_irq)(struct pci_dev *dev);
 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
 
+extern bool mp_should_keep_irq(struct device *dev);
+
 struct pci_raw_ops {
        int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
                                                int reg, int len, u32 *val);
index a4efe477ceab0815244765a5d882f286aec0817c..625660f8a2fcf0cb4b4b1a9216908c98fffe00a1 100644 (file)
@@ -92,7 +92,7 @@ static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
                unsigned count = SPIN_THRESHOLD;
 
                do {
-                       if (ACCESS_ONCE(lock->tickets.head) == inc.tail)
+                       if (READ_ONCE(lock->tickets.head) == inc.tail)
                                goto out;
                        cpu_relax();
                } while (--count);
@@ -105,7 +105,7 @@ static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
 {
        arch_spinlock_t old, new;
 
-       old.tickets = ACCESS_ONCE(lock->tickets);
+       old.tickets = READ_ONCE(lock->tickets);
        if (old.tickets.head != (old.tickets.tail & ~TICKET_SLOWPATH_FLAG))
                return 0;
 
@@ -162,14 +162,14 @@ static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
 
 static inline int arch_spin_is_locked(arch_spinlock_t *lock)
 {
-       struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+       struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 
        return tmp.tail != tmp.head;
 }
 
 static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 {
-       struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+       struct __raw_tickets tmp = READ_ONCE(lock->tickets);
 
        return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
 }
index 46727eb37bfe20915242badc965a2b4985bd28b4..6e1aaf73852ac956156df80f4dfcb931633f5269 100644 (file)
@@ -28,6 +28,13 @@ struct user_desc {
        unsigned int  seg_not_present:1;
        unsigned int  useable:1;
 #ifdef __x86_64__
+       /*
+        * Because this bit is not present in 32-bit user code, user
+        * programs can pass uninitialized values here.  Therefore, in
+        * any context in which a user_desc comes from a 32-bit program,
+        * the kernel must act as though lm == 0, regardless of the
+        * actual value.
+        */
        unsigned int  lm:1;
 #endif
 };
index a142e77693e179334987d3502e33c5444fde885c..4433a4be8171b095ff56bb6699f71d67857cc6fb 100644 (file)
@@ -76,6 +76,19 @@ int acpi_fix_pin2_polarity __initdata;
 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
 #endif
 
+/*
+ * Locks related to IOAPIC hotplug
+ * Hotplug side:
+ *     ->device_hotplug_lock
+ *             ->acpi_ioapic_lock
+ *                     ->ioapic_lock
+ * Interrupt mapping side:
+ *     ->acpi_ioapic_lock
+ *             ->ioapic_mutex
+ *                     ->ioapic_lock
+ */
+static DEFINE_MUTEX(acpi_ioapic_lock);
+
 /* --------------------------------------------------------------------------
                               Boot-time Configuration
    -------------------------------------------------------------------------- */
@@ -395,10 +408,6 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
        if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
                return gsi;
 
-       /* Don't set up the ACPI SCI because it's already set up */
-       if (acpi_gbl_FADT.sci_interrupt == gsi)
-               return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC);
-
        trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
        polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
        node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
@@ -411,7 +420,8 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
        if (irq < 0)
                return irq;
 
-       if (enable_update_mptable)
+       /* Don't set up the ACPI SCI because it's already set up */
+       if (enable_update_mptable && acpi_gbl_FADT.sci_interrupt != gsi)
                mp_config_acpi_gsi(dev, gsi, trigger, polarity);
 
        return irq;
@@ -424,9 +434,6 @@ static void mp_unregister_gsi(u32 gsi)
        if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
                return;
 
-       if (acpi_gbl_FADT.sci_interrupt == gsi)
-               return;
-
        irq = mp_map_gsi_to_irq(gsi, 0);
        if (irq > 0)
                mp_unmap_irq(irq);
@@ -609,8 +616,10 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
        if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
                *irqp = gsi;
        } else {
+               mutex_lock(&acpi_ioapic_lock);
                irq = mp_map_gsi_to_irq(gsi,
                                        IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
+               mutex_unlock(&acpi_ioapic_lock);
                if (irq < 0)
                        return -1;
                *irqp = irq;
@@ -650,7 +659,9 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
        int irq = gsi;
 
 #ifdef CONFIG_X86_IO_APIC
+       mutex_lock(&acpi_ioapic_lock);
        irq = mp_register_gsi(dev, gsi, trigger, polarity);
+       mutex_unlock(&acpi_ioapic_lock);
 #endif
 
        return irq;
@@ -659,7 +670,9 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
 static void acpi_unregister_gsi_ioapic(u32 gsi)
 {
 #ifdef CONFIG_X86_IO_APIC
+       mutex_lock(&acpi_ioapic_lock);
        mp_unregister_gsi(gsi);
+       mutex_unlock(&acpi_ioapic_lock);
 #endif
 }
 
@@ -690,6 +703,7 @@ void acpi_unregister_gsi(u32 gsi)
 }
 EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
 
+#ifdef CONFIG_X86_LOCAL_APIC
 static void __init acpi_set_irq_model_ioapic(void)
 {
        acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
@@ -697,6 +711,7 @@ static void __init acpi_set_irq_model_ioapic(void)
        __acpi_unregister_gsi = acpi_unregister_gsi_ioapic;
        acpi_ioapic = 1;
 }
+#endif
 
 /*
  *  ACPI based hotplug support for CPU
@@ -759,20 +774,74 @@ EXPORT_SYMBOL(acpi_unmap_lsapic);
 
 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
 {
-       /* TBD */
-       return -EINVAL;
-}
+       int ret = -ENOSYS;
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+       int ioapic_id;
+       u64 addr;
+       struct ioapic_domain_cfg cfg = {
+               .type = IOAPIC_DOMAIN_DYNAMIC,
+               .ops = &acpi_irqdomain_ops,
+       };
+
+       ioapic_id = acpi_get_ioapic_id(handle, gsi_base, &addr);
+       if (ioapic_id < 0) {
+               unsigned long long uid;
+               acpi_status status;
 
+               status = acpi_evaluate_integer(handle, METHOD_NAME__UID,
+                                              NULL, &uid);
+               if (ACPI_FAILURE(status)) {
+                       acpi_handle_warn(handle, "failed to get IOAPIC ID.\n");
+                       return -EINVAL;
+               }
+               ioapic_id = (int)uid;
+       }
+
+       mutex_lock(&acpi_ioapic_lock);
+       ret  = mp_register_ioapic(ioapic_id, phys_addr, gsi_base, &cfg);
+       mutex_unlock(&acpi_ioapic_lock);
+#endif
+
+       return ret;
+}
 EXPORT_SYMBOL(acpi_register_ioapic);
 
 int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
 {
-       /* TBD */
-       return -EINVAL;
-}
+       int ret = -ENOSYS;
 
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+       mutex_lock(&acpi_ioapic_lock);
+       ret  = mp_unregister_ioapic(gsi_base);
+       mutex_unlock(&acpi_ioapic_lock);
+#endif
+
+       return ret;
+}
 EXPORT_SYMBOL(acpi_unregister_ioapic);
 
+/**
+ * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base
+ *                         has been registered
+ * @handle:    ACPI handle of the IOAPIC deivce
+ * @gsi_base:  GSI base associated with the IOAPIC
+ *
+ * Assume caller holds some type of lock to serialize acpi_ioapic_registered()
+ * with acpi_register_ioapic()/acpi_unregister_ioapic().
+ */
+int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base)
+{
+       int ret = 0;
+
+#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
+       mutex_lock(&acpi_ioapic_lock);
+       ret  = mp_ioapic_registered(gsi_base);
+       mutex_unlock(&acpi_ioapic_lock);
+#endif
+
+       return ret;
+}
+
 static int __init acpi_parse_sbf(struct acpi_table_header *table)
 {
        struct acpi_table_boot *sb;
@@ -1185,7 +1254,9 @@ static void __init acpi_process_madt(void)
                        /*
                         * Parse MADT IO-APIC entries
                         */
+                       mutex_lock(&acpi_ioapic_lock);
                        error = acpi_parse_madt_ioapic_entries();
+                       mutex_unlock(&acpi_ioapic_lock);
                        if (!error) {
                                acpi_set_irq_model_ioapic();
 
index dcb5b15401ce805f15bb84c111b842696606ea56..8bb12ddc5db8c8b9c7f2e0abdce44fef00f46453 100644 (file)
@@ -2,10 +2,12 @@
 # Makefile for local APIC drivers and for the IO-APIC code
 #
 
-obj-$(CONFIG_X86_LOCAL_APIC)   += apic.o apic_noop.o ipi.o
+obj-$(CONFIG_X86_LOCAL_APIC)   += apic.o apic_noop.o ipi.o vector.o
 obj-y                          += hw_nmi.o
 
 obj-$(CONFIG_X86_IO_APIC)      += io_apic.o
+obj-$(CONFIG_PCI_MSI)          += msi.o
+obj-$(CONFIG_HT_IRQ)           += htirq.o
 obj-$(CONFIG_SMP)              += ipi.o
 
 ifeq ($(CONFIG_X86_64),y)
index ba6cc041edb12e23a783fc0c5aaaed788af39dc0..29b5b18afa27dca80d384fade47906299a581e8f 100644 (file)
@@ -196,7 +196,7 @@ static int disable_apic_timer __initdata;
 int local_apic_timer_c2_ok;
 EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
 
-int first_system_vector = 0xfe;
+int first_system_vector = FIRST_SYSTEM_VECTOR;
 
 /*
  * Debug level, exported for io_apic.c
@@ -1930,7 +1930,7 @@ int __init APIC_init_uniprocessor(void)
 /*
  * This interrupt should _never_ happen with our APIC/SMP architecture
  */
-static inline void __smp_spurious_interrupt(void)
+static inline void __smp_spurious_interrupt(u8 vector)
 {
        u32 v;
 
@@ -1939,30 +1939,32 @@ static inline void __smp_spurious_interrupt(void)
         * if it is a vectored one.  Just in case...
         * Spurious interrupts should not be ACKed.
         */
-       v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
-       if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
+       v = apic_read(APIC_ISR + ((vector & ~0x1f) >> 1));
+       if (v & (1 << (vector & 0x1f)))
                ack_APIC_irq();
 
        inc_irq_stat(irq_spurious_count);
 
        /* see sw-dev-man vol 3, chapter 7.4.13.5 */
-       pr_info("spurious APIC interrupt on CPU#%d, "
-               "should never happen.\n", smp_processor_id());
+       pr_info("spurious APIC interrupt through vector %02x on CPU#%d, "
+               "should never happen.\n", vector, smp_processor_id());
 }
 
 __visible void smp_spurious_interrupt(struct pt_regs *regs)
 {
        entering_irq();
-       __smp_spurious_interrupt();
+       __smp_spurious_interrupt(~regs->orig_ax);
        exiting_irq();
 }
 
 __visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
 {
+       u8 vector = ~regs->orig_ax;
+
        entering_irq();
-       trace_spurious_apic_entry(SPURIOUS_APIC_VECTOR);
-       __smp_spurious_interrupt();
-       trace_spurious_apic_exit(SPURIOUS_APIC_VECTOR);
+       trace_spurious_apic_entry(vector);
+       __smp_spurious_interrupt(vector);
+       trace_spurious_apic_exit(vector);
        exiting_irq();
 }
 
diff --git a/arch/x86/kernel/apic/htirq.c b/arch/x86/kernel/apic/htirq.c
new file mode 100644 (file)
index 0000000..816f36e
--- /dev/null
@@ -0,0 +1,107 @@
+/*
+ * Support Hypertransport IRQ
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
+ *     Moved from arch/x86/kernel/apic/io_apic.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/htirq.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/hypertransport.h>
+
+/*
+ * Hypertransport interrupt support
+ */
+static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
+{
+       struct ht_irq_msg msg;
+
+       fetch_ht_irq_msg(irq, &msg);
+
+       msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
+       msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
+
+       msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
+       msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
+
+       write_ht_irq_msg(irq, &msg);
+}
+
+static int
+ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
+{
+       struct irq_cfg *cfg = irqd_cfg(data);
+       unsigned int dest;
+       int ret;
+
+       ret = apic_set_affinity(data, mask, &dest);
+       if (ret)
+               return ret;
+
+       target_ht_irq(data->irq, dest, cfg->vector);
+       return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static struct irq_chip ht_irq_chip = {
+       .name                   = "PCI-HT",
+       .irq_mask               = mask_ht_irq,
+       .irq_unmask             = unmask_ht_irq,
+       .irq_ack                = apic_ack_edge,
+       .irq_set_affinity       = ht_set_affinity,
+       .irq_retrigger          = apic_retrigger_irq,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
+{
+       struct irq_cfg *cfg;
+       struct ht_irq_msg msg;
+       unsigned dest;
+       int err;
+
+       if (disable_apic)
+               return -ENXIO;
+
+       cfg = irq_cfg(irq);
+       err = assign_irq_vector(irq, cfg, apic->target_cpus());
+       if (err)
+               return err;
+
+       err = apic->cpu_mask_to_apicid_and(cfg->domain,
+                                          apic->target_cpus(), &dest);
+       if (err)
+               return err;
+
+       msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
+
+       msg.address_lo =
+               HT_IRQ_LOW_BASE |
+               HT_IRQ_LOW_DEST_ID(dest) |
+               HT_IRQ_LOW_VECTOR(cfg->vector) |
+               ((apic->irq_dest_mode == 0) ?
+                       HT_IRQ_LOW_DM_PHYSICAL :
+                       HT_IRQ_LOW_DM_LOGICAL) |
+               HT_IRQ_LOW_RQEOI_EDGE |
+               ((apic->irq_delivery_mode != dest_LowestPrio) ?
+                       HT_IRQ_LOW_MT_FIXED :
+                       HT_IRQ_LOW_MT_ARBITRATED) |
+               HT_IRQ_LOW_IRQ_MASKED;
+
+       write_ht_irq_msg(irq, &msg);
+
+       irq_set_chip_and_handler_name(irq, &ht_irq_chip,
+                                     handle_edge_irq, "edge");
+
+       dev_dbg(&dev->dev, "irq %d for HT\n", irq);
+
+       return 0;
+}
index a6745e7567298f0f7a6e989396282551803a587e..3f5f60406ab17659e294e725fbe18655c94048ac 100644 (file)
 #include <linux/module.h>
 #include <linux/syscore_ops.h>
 #include <linux/irqdomain.h>
-#include <linux/msi.h>
-#include <linux/htirq.h>
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/jiffies.h>     /* time_after() */
 #include <linux/slab.h>
 #include <linux/bootmem.h>
-#include <linux/dmar.h>
-#include <linux/hpet.h>
 
 #include <asm/idle.h>
 #include <asm/io.h>
 #include <asm/dma.h>
 #include <asm/timer.h>
 #include <asm/i8259.h>
-#include <asm/msidef.h>
-#include <asm/hypertransport.h>
 #include <asm/setup.h>
 #include <asm/irq_remapping.h>
-#include <asm/hpet.h>
 #include <asm/hw_irq.h>
 
 #include <asm/apic.h>
 
-#define __apicdebuginit(type) static type __init
-
 #define        for_each_ioapic(idx)            \
        for ((idx) = 0; (idx) < nr_ioapics; (idx)++)
 #define        for_each_ioapic_reverse(idx)    \
@@ -74,7 +65,7 @@
                for_each_pin((idx), (pin))
 
 #define for_each_irq_pin(entry, head) \
-       for (entry = head; entry; entry = entry->next)
+       list_for_each_entry(entry, &head, list)
 
 /*
  *      Is the SiS APIC rmw bug present ?
@@ -83,7 +74,6 @@
 int sis_apic_bug = -1;
 
 static DEFINE_RAW_SPINLOCK(ioapic_lock);
-static DEFINE_RAW_SPINLOCK(vector_lock);
 static DEFINE_MUTEX(ioapic_mutex);
 static unsigned int ioapic_dynirq_base;
 static int ioapic_initialized;
@@ -112,6 +102,7 @@ static struct ioapic {
        struct ioapic_domain_cfg irqdomain_cfg;
        struct irq_domain *irqdomain;
        struct mp_pin_info *pin_info;
+       struct resource *iomem_res;
 } ioapics[MAX_IO_APICS];
 
 #define mpc_ioapic_ver(ioapic_idx)     ioapics[ioapic_idx].mp_config.apicver
@@ -205,8 +196,6 @@ static int __init parse_noapic(char *str)
 }
 early_param("noapic", parse_noapic);
 
-static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
-
 /* Will be called in mpparse/acpi/sfi codes for saving IRQ info */
 void mp_save_irq(struct mpc_intsrc *m)
 {
@@ -228,8 +217,8 @@ void mp_save_irq(struct mpc_intsrc *m)
 }
 
 struct irq_pin_list {
+       struct list_head list;
        int apic, pin;
-       struct irq_pin_list *next;
 };
 
 static struct irq_pin_list *alloc_irq_pin_list(int node)
@@ -237,7 +226,26 @@ static struct irq_pin_list *alloc_irq_pin_list(int node)
        return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node);
 }
 
-int __init arch_early_irq_init(void)
+static void alloc_ioapic_saved_registers(int idx)
+{
+       size_t size;
+
+       if (ioapics[idx].saved_registers)
+               return;
+
+       size = sizeof(struct IO_APIC_route_entry) * ioapics[idx].nr_registers;
+       ioapics[idx].saved_registers = kzalloc(size, GFP_KERNEL);
+       if (!ioapics[idx].saved_registers)
+               pr_err("IOAPIC %d: suspend/resume impossible!\n", idx);
+}
+
+static void free_ioapic_saved_registers(int idx)
+{
+       kfree(ioapics[idx].saved_registers);
+       ioapics[idx].saved_registers = NULL;
+}
+
+int __init arch_early_ioapic_init(void)
 {
        struct irq_cfg *cfg;
        int i, node = cpu_to_node(0);
@@ -245,13 +253,8 @@ int __init arch_early_irq_init(void)
        if (!nr_legacy_irqs())
                io_apic_irqs = ~0UL;
 
-       for_each_ioapic(i) {
-               ioapics[i].saved_registers =
-                       kzalloc(sizeof(struct IO_APIC_route_entry) *
-                               ioapics[i].nr_registers, GFP_KERNEL);
-               if (!ioapics[i].saved_registers)
-                       pr_err("IOAPIC %d: suspend/resume impossible!\n", i);
-       }
+       for_each_ioapic(i)
+               alloc_ioapic_saved_registers(i);
 
        /*
         * For legacy IRQ's, start with assigning irq0 to irq15 to
@@ -266,61 +269,6 @@ int __init arch_early_irq_init(void)
        return 0;
 }
 
-static inline struct irq_cfg *irq_cfg(unsigned int irq)
-{
-       return irq_get_chip_data(irq);
-}
-
-static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
-{
-       struct irq_cfg *cfg;
-
-       cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
-       if (!cfg)
-               return NULL;
-       if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
-               goto out_cfg;
-       if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
-               goto out_domain;
-       return cfg;
-out_domain:
-       free_cpumask_var(cfg->domain);
-out_cfg:
-       kfree(cfg);
-       return NULL;
-}
-
-static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
-{
-       if (!cfg)
-               return;
-       irq_set_chip_data(at, NULL);
-       free_cpumask_var(cfg->domain);
-       free_cpumask_var(cfg->old_domain);
-       kfree(cfg);
-}
-
-static struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
-{
-       int res = irq_alloc_desc_at(at, node);
-       struct irq_cfg *cfg;
-
-       if (res < 0) {
-               if (res != -EEXIST)
-                       return NULL;
-               cfg = irq_cfg(at);
-               if (cfg)
-                       return cfg;
-       }
-
-       cfg = alloc_irq_cfg(at, node);
-       if (cfg)
-               irq_set_chip_data(at, cfg);
-       else
-               irq_free_desc(at);
-       return cfg;
-}
-
 struct io_apic {
        unsigned int index;
        unsigned int unused[3];
@@ -445,15 +393,12 @@ static void ioapic_mask_entry(int apic, int pin)
  */
 static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
 {
-       struct irq_pin_list **last, *entry;
+       struct irq_pin_list *entry;
 
        /* don't allow duplicates */
-       last = &cfg->irq_2_pin;
-       for_each_irq_pin(entry, cfg->irq_2_pin) {
+       for_each_irq_pin(entry, cfg->irq_2_pin)
                if (entry->apic == apic && entry->pin == pin)
                        return 0;
-               last = &entry->next;
-       }
 
        entry = alloc_irq_pin_list(node);
        if (!entry) {
@@ -464,22 +409,19 @@ static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pi
        entry->apic = apic;
        entry->pin = pin;
 
-       *last = entry;
+       list_add_tail(&entry->list, &cfg->irq_2_pin);
        return 0;
 }
 
 static void __remove_pin_from_irq(struct irq_cfg *cfg, int apic, int pin)
 {
-       struct irq_pin_list **last, *entry;
+       struct irq_pin_list *tmp, *entry;
 
-       last = &cfg->irq_2_pin;
-       for_each_irq_pin(entry, cfg->irq_2_pin)
+       list_for_each_entry_safe(entry, tmp, &cfg->irq_2_pin, list)
                if (entry->apic == apic && entry->pin == pin) {
-                       *last = entry->next;
+                       list_del(&entry->list);
                        kfree(entry);
                        return;
-               } else {
-                       last = &entry->next;
                }
 }
 
@@ -559,7 +501,7 @@ static void mask_ioapic(struct irq_cfg *cfg)
 
 static void mask_ioapic_irq(struct irq_data *data)
 {
-       mask_ioapic(data->chip_data);
+       mask_ioapic(irqd_cfg(data));
 }
 
 static void __unmask_ioapic(struct irq_cfg *cfg)
@@ -578,7 +520,7 @@ static void unmask_ioapic(struct irq_cfg *cfg)
 
 static void unmask_ioapic_irq(struct irq_data *data)
 {
-       unmask_ioapic(data->chip_data);
+       unmask_ioapic(irqd_cfg(data));
 }
 
 /*
@@ -1164,8 +1106,7 @@ void mp_unmap_irq(int irq)
  * Find a specific PCI IRQ entry.
  * Not an __init, possibly needed by modules
  */
-int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
-                               struct io_apic_irq_attr *irq_attr)
+int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
 {
        int irq, i, best_ioapic = -1, best_idx = -1;
 
@@ -1219,195 +1160,11 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
                return -1;
 
 out:
-       irq = pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq,
-                       IOAPIC_MAP_ALLOC);
-       if (irq > 0)
-               set_io_apic_irq_attr(irq_attr, best_ioapic,
-                                    mp_irqs[best_idx].dstirq,
-                                    irq_trigger(best_idx),
-                                    irq_polarity(best_idx));
-       return irq;
+       return pin_2_irq(best_idx, best_ioapic, mp_irqs[best_idx].dstirq,
+                        IOAPIC_MAP_ALLOC);
 }
 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
 
-void lock_vector_lock(void)
-{
-       /* Used to the online set of cpus does not change
-        * during assign_irq_vector.
-        */
-       raw_spin_lock(&vector_lock);
-}
-
-void unlock_vector_lock(void)
-{
-       raw_spin_unlock(&vector_lock);
-}
-
-static int
-__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
-{
-       /*
-        * NOTE! The local APIC isn't very good at handling
-        * multiple interrupts at the same interrupt level.
-        * As the interrupt level is determined by taking the
-        * vector number and shifting that right by 4, we
-        * want to spread these out a bit so that they don't
-        * all fall in the same interrupt level.
-        *
-        * Also, we've got to be careful not to trash gate
-        * 0x80, because int 0x80 is hm, kind of importantish. ;)
-        */
-       static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
-       static int current_offset = VECTOR_OFFSET_START % 16;
-       int cpu, err;
-       cpumask_var_t tmp_mask;
-
-       if (cfg->move_in_progress)
-               return -EBUSY;
-
-       if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
-               return -ENOMEM;
-
-       /* Only try and allocate irqs on cpus that are present */
-       err = -ENOSPC;
-       cpumask_clear(cfg->old_domain);
-       cpu = cpumask_first_and(mask, cpu_online_mask);
-       while (cpu < nr_cpu_ids) {
-               int new_cpu, vector, offset;
-
-               apic->vector_allocation_domain(cpu, tmp_mask, mask);
-
-               if (cpumask_subset(tmp_mask, cfg->domain)) {
-                       err = 0;
-                       if (cpumask_equal(tmp_mask, cfg->domain))
-                               break;
-                       /*
-                        * New cpumask using the vector is a proper subset of
-                        * the current in use mask. So cleanup the vector
-                        * allocation for the members that are not used anymore.
-                        */
-                       cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
-                       cfg->move_in_progress =
-                          cpumask_intersects(cfg->old_domain, cpu_online_mask);
-                       cpumask_and(cfg->domain, cfg->domain, tmp_mask);
-                       break;
-               }
-
-               vector = current_vector;
-               offset = current_offset;
-next:
-               vector += 16;
-               if (vector >= first_system_vector) {
-                       offset = (offset + 1) % 16;
-                       vector = FIRST_EXTERNAL_VECTOR + offset;
-               }
-
-               if (unlikely(current_vector == vector)) {
-                       cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
-                       cpumask_andnot(tmp_mask, mask, cfg->old_domain);
-                       cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
-                       continue;
-               }
-
-               if (test_bit(vector, used_vectors))
-                       goto next;
-
-               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
-                       if (per_cpu(vector_irq, new_cpu)[vector] > VECTOR_UNDEFINED)
-                               goto next;
-               }
-               /* Found one! */
-               current_vector = vector;
-               current_offset = offset;
-               if (cfg->vector) {
-                       cpumask_copy(cfg->old_domain, cfg->domain);
-                       cfg->move_in_progress =
-                          cpumask_intersects(cfg->old_domain, cpu_online_mask);
-               }
-               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
-                       per_cpu(vector_irq, new_cpu)[vector] = irq;
-               cfg->vector = vector;
-               cpumask_copy(cfg->domain, tmp_mask);
-               err = 0;
-               break;
-       }
-       free_cpumask_var(tmp_mask);
-       return err;
-}
-
-int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
-{
-       int err;
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&vector_lock, flags);
-       err = __assign_irq_vector(irq, cfg, mask);
-       raw_spin_unlock_irqrestore(&vector_lock, flags);
-       return err;
-}
-
-static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
-{
-       int cpu, vector;
-
-       BUG_ON(!cfg->vector);
-
-       vector = cfg->vector;
-       for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
-               per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
-
-       cfg->vector = 0;
-       cpumask_clear(cfg->domain);
-
-       if (likely(!cfg->move_in_progress))
-               return;
-       for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
-               for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
-                       if (per_cpu(vector_irq, cpu)[vector] != irq)
-                               continue;
-                       per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
-                       break;
-               }
-       }
-       cfg->move_in_progress = 0;
-}
-
-void __setup_vector_irq(int cpu)
-{
-       /* Initialize vector_irq on a new cpu */
-       int irq, vector;
-       struct irq_cfg *cfg;
-
-       /*
-        * vector_lock will make sure that we don't run into irq vector
-        * assignments that might be happening on another cpu in parallel,
-        * while we setup our initial vector to irq mappings.
-        */
-       raw_spin_lock(&vector_lock);
-       /* Mark the inuse vectors */
-       for_each_active_irq(irq) {
-               cfg = irq_cfg(irq);
-               if (!cfg)
-                       continue;
-
-               if (!cpumask_test_cpu(cpu, cfg->domain))
-                       continue;
-               vector = cfg->vector;
-               per_cpu(vector_irq, cpu)[vector] = irq;
-       }
-       /* Mark the free vectors */
-       for (vector = 0; vector < NR_VECTORS; ++vector) {
-               irq = per_cpu(vector_irq, cpu)[vector];
-               if (irq <= VECTOR_UNDEFINED)
-                       continue;
-
-               cfg = irq_cfg(irq);
-               if (!cpumask_test_cpu(cpu, cfg->domain))
-                       per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
-       }
-       raw_spin_unlock(&vector_lock);
-}
-
 static struct irq_chip ioapic_chip;
 
 #ifdef CONFIG_X86_32
@@ -1496,7 +1253,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
                                         &dest)) {
                pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
                        mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
-               __clear_irq_vector(irq, cfg);
+               clear_irq_vector(irq, cfg);
 
                return;
        }
@@ -1510,7 +1267,7 @@ static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
        if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) {
                pr_warn("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
                        mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
-               __clear_irq_vector(irq, cfg);
+               clear_irq_vector(irq, cfg);
 
                return;
        }
@@ -1641,7 +1398,7 @@ void ioapic_zap_locks(void)
        raw_spin_lock_init(&ioapic_lock);
 }
 
-__apicdebuginit(void) print_IO_APIC(int ioapic_idx)
+static void __init print_IO_APIC(int ioapic_idx)
 {
        union IO_APIC_reg_00 reg_00;
        union IO_APIC_reg_01 reg_01;
@@ -1698,7 +1455,7 @@ __apicdebuginit(void) print_IO_APIC(int ioapic_idx)
        x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
 }
 
-__apicdebuginit(void) print_IO_APICs(void)
+void __init print_IO_APICs(void)
 {
        int ioapic_idx;
        struct irq_cfg *cfg;
@@ -1731,8 +1488,7 @@ __apicdebuginit(void) print_IO_APICs(void)
                cfg = irq_cfg(irq);
                if (!cfg)
                        continue;
-               entry = cfg->irq_2_pin;
-               if (!entry)
+               if (list_empty(&cfg->irq_2_pin))
                        continue;
                printk(KERN_DEBUG "IRQ%d ", irq);
                for_each_irq_pin(entry, cfg->irq_2_pin)
@@ -1743,205 +1499,6 @@ __apicdebuginit(void) print_IO_APICs(void)
        printk(KERN_INFO ".................................... done.\n");
 }
 
-__apicdebuginit(void) print_APIC_field(int base)
-{
-       int i;
-
-       printk(KERN_DEBUG);
-
-       for (i = 0; i < 8; i++)
-               pr_cont("%08x", apic_read(base + i*0x10));
-
-       pr_cont("\n");
-}
-
-__apicdebuginit(void) print_local_APIC(void *dummy)
-{
-       unsigned int i, v, ver, maxlvt;
-       u64 icr;
-
-       printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
-               smp_processor_id(), hard_smp_processor_id());
-       v = apic_read(APIC_ID);
-       printk(KERN_INFO "... APIC ID:      %08x (%01x)\n", v, read_apic_id());
-       v = apic_read(APIC_LVR);
-       printk(KERN_INFO "... APIC VERSION: %08x\n", v);
-       ver = GET_APIC_VERSION(v);
-       maxlvt = lapic_get_maxlvt();
-
-       v = apic_read(APIC_TASKPRI);
-       printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
-
-       if (APIC_INTEGRATED(ver)) {                     /* !82489DX */
-               if (!APIC_XAPIC(ver)) {
-                       v = apic_read(APIC_ARBPRI);
-                       printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
-                              v & APIC_ARBPRI_MASK);
-               }
-               v = apic_read(APIC_PROCPRI);
-               printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
-       }
-
-       /*
-        * Remote read supported only in the 82489DX and local APIC for
-        * Pentium processors.
-        */
-       if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
-               v = apic_read(APIC_RRR);
-               printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
-       }
-
-       v = apic_read(APIC_LDR);
-       printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
-       if (!x2apic_enabled()) {
-               v = apic_read(APIC_DFR);
-               printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
-       }
-       v = apic_read(APIC_SPIV);
-       printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
-
-       printk(KERN_DEBUG "... APIC ISR field:\n");
-       print_APIC_field(APIC_ISR);
-       printk(KERN_DEBUG "... APIC TMR field:\n");
-       print_APIC_field(APIC_TMR);
-       printk(KERN_DEBUG "... APIC IRR field:\n");
-       print_APIC_field(APIC_IRR);
-
-       if (APIC_INTEGRATED(ver)) {             /* !82489DX */
-               if (maxlvt > 3)         /* Due to the Pentium erratum 3AP. */
-                       apic_write(APIC_ESR, 0);
-
-               v = apic_read(APIC_ESR);
-               printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
-       }
-
-       icr = apic_icr_read();
-       printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
-       printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
-
-       v = apic_read(APIC_LVTT);
-       printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
-
-       if (maxlvt > 3) {                       /* PC is LVT#4. */
-               v = apic_read(APIC_LVTPC);
-               printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
-       }
-       v = apic_read(APIC_LVT0);
-       printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
-       v = apic_read(APIC_LVT1);
-       printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
-
-       if (maxlvt > 2) {                       /* ERR is LVT#3. */
-               v = apic_read(APIC_LVTERR);
-               printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
-       }
-
-       v = apic_read(APIC_TMICT);
-       printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
-       v = apic_read(APIC_TMCCT);
-       printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
-       v = apic_read(APIC_TDCR);
-       printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
-
-       if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
-               v = apic_read(APIC_EFEAT);
-               maxlvt = (v >> 16) & 0xff;
-               printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
-               v = apic_read(APIC_ECTRL);
-               printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
-               for (i = 0; i < maxlvt; i++) {
-                       v = apic_read(APIC_EILVTn(i));
-                       printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
-               }
-       }
-       pr_cont("\n");
-}
-
-__apicdebuginit(void) print_local_APICs(int maxcpu)
-{
-       int cpu;
-
-       if (!maxcpu)
-               return;
-
-       preempt_disable();
-       for_each_online_cpu(cpu) {
-               if (cpu >= maxcpu)
-                       break;
-               smp_call_function_single(cpu, print_local_APIC, NULL, 1);
-       }
-       preempt_enable();
-}
-
-__apicdebuginit(void) print_PIC(void)
-{
-       unsigned int v;
-       unsigned long flags;
-
-       if (!nr_legacy_irqs())
-               return;
-
-       printk(KERN_DEBUG "\nprinting PIC contents\n");
-
-       raw_spin_lock_irqsave(&i8259A_lock, flags);
-
-       v = inb(0xa1) << 8 | inb(0x21);
-       printk(KERN_DEBUG "... PIC  IMR: %04x\n", v);
-
-       v = inb(0xa0) << 8 | inb(0x20);
-       printk(KERN_DEBUG "... PIC  IRR: %04x\n", v);
-
-       outb(0x0b,0xa0);
-       outb(0x0b,0x20);
-       v = inb(0xa0) << 8 | inb(0x20);
-       outb(0x0a,0xa0);
-       outb(0x0a,0x20);
-
-       raw_spin_unlock_irqrestore(&i8259A_lock, flags);
-
-       printk(KERN_DEBUG "... PIC  ISR: %04x\n", v);
-
-       v = inb(0x4d1) << 8 | inb(0x4d0);
-       printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
-}
-
-static int __initdata show_lapic = 1;
-static __init int setup_show_lapic(char *arg)
-{
-       int num = -1;
-
-       if (strcmp(arg, "all") == 0) {
-               show_lapic = CONFIG_NR_CPUS;
-       } else {
-               get_option(&arg, &num);
-               if (num >= 0)
-                       show_lapic = num;
-       }
-
-       return 1;
-}
-__setup("show_lapic=", setup_show_lapic);
-
-__apicdebuginit(int) print_ICs(void)
-{
-       if (apic_verbosity == APIC_QUIET)
-               return 0;
-
-       print_PIC();
-
-       /* don't print out if apic is not there */
-       if (!cpu_has_apic && !apic_from_smp_config())
-               return 0;
-
-       print_local_APICs(show_lapic);
-       print_IO_APICs();
-
-       return 0;
-}
-
-late_initcall(print_ICs);
-
-
 /* Where if anywhere is the i8259 connect in external int mode */
 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
 
@@ -2244,26 +1801,12 @@ static unsigned int startup_ioapic_irq(struct irq_data *data)
                if (legacy_pic->irq_pending(irq))
                        was_pending = 1;
        }
-       __unmask_ioapic(data->chip_data);
+       __unmask_ioapic(irqd_cfg(data));
        raw_spin_unlock_irqrestore(&ioapic_lock, flags);
 
        return was_pending;
 }
 
-static int ioapic_retrigger_irq(struct irq_data *data)
-{
-       struct irq_cfg *cfg = data->chip_data;
-       unsigned long flags;
-       int cpu;
-
-       raw_spin_lock_irqsave(&vector_lock, flags);
-       cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
-       apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
-       raw_spin_unlock_irqrestore(&vector_lock, flags);
-
-       return 1;
-}
-
 /*
  * Level and edge triggered IO-APIC interrupts need different handling,
  * so we use two separate IRQ descriptors. Edge triggered IRQs can be
@@ -2273,113 +1816,6 @@ static int ioapic_retrigger_irq(struct irq_data *data)
  * races.
  */
 
-#ifdef CONFIG_SMP
-void send_cleanup_vector(struct irq_cfg *cfg)
-{
-       cpumask_var_t cleanup_mask;
-
-       if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
-               unsigned int i;
-               for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
-                       apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
-       } else {
-               cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
-               apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
-               free_cpumask_var(cleanup_mask);
-       }
-       cfg->move_in_progress = 0;
-}
-
-asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
-{
-       unsigned vector, me;
-
-       ack_APIC_irq();
-       irq_enter();
-       exit_idle();
-
-       me = smp_processor_id();
-       for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
-               int irq;
-               unsigned int irr;
-               struct irq_desc *desc;
-               struct irq_cfg *cfg;
-               irq = __this_cpu_read(vector_irq[vector]);
-
-               if (irq <= VECTOR_UNDEFINED)
-                       continue;
-
-               desc = irq_to_desc(irq);
-               if (!desc)
-                       continue;
-
-               cfg = irq_cfg(irq);
-               if (!cfg)
-                       continue;
-
-               raw_spin_lock(&desc->lock);
-
-               /*
-                * Check if the irq migration is in progress. If so, we
-                * haven't received the cleanup request yet for this irq.
-                */
-               if (cfg->move_in_progress)
-                       goto unlock;
-
-               if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
-                       goto unlock;
-
-               irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
-               /*
-                * Check if the vector that needs to be cleanedup is
-                * registered at the cpu's IRR. If so, then this is not
-                * the best time to clean it up. Lets clean it up in the
-                * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
-                * to myself.
-                */
-               if (irr  & (1 << (vector % 32))) {
-                       apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
-                       goto unlock;
-               }
-               __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
-unlock:
-               raw_spin_unlock(&desc->lock);
-       }
-
-       irq_exit();
-}
-
-static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
-{
-       unsigned me;
-
-       if (likely(!cfg->move_in_progress))
-               return;
-
-       me = smp_processor_id();
-
-       if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
-               send_cleanup_vector(cfg);
-}
-
-static void irq_complete_move(struct irq_cfg *cfg)
-{
-       __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
-}
-
-void irq_force_complete_move(int irq)
-{
-       struct irq_cfg *cfg = irq_cfg(irq);
-
-       if (!cfg)
-               return;
-
-       __irq_complete_move(cfg, cfg->vector);
-}
-#else
-static inline void irq_complete_move(struct irq_cfg *cfg) { }
-#endif
-
 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
 {
        int apic, pin;
@@ -2400,71 +1836,29 @@ static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq
        }
 }
 
-/*
- * Either sets data->affinity to a valid value, and returns
- * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
- * leaves data->affinity untouched.
- */
-int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
-                         unsigned int *dest_id)
+int native_ioapic_set_affinity(struct irq_data *data,
+                              const struct cpumask *mask,
+                              bool force)
 {
-       struct irq_cfg *cfg = data->chip_data;
-       unsigned int irq = data->irq;
-       int err;
-
-       if (!config_enabled(CONFIG_SMP))
-               return -EPERM;
-
-       if (!cpumask_intersects(mask, cpu_online_mask))
-               return -EINVAL;
-
-       err = assign_irq_vector(irq, cfg, mask);
-       if (err)
-               return err;
-
-       err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
-       if (err) {
-               if (assign_irq_vector(irq, cfg, data->affinity))
-                       pr_err("Failed to recover vector for irq %d\n", irq);
-               return err;
-       }
-
-       cpumask_copy(data->affinity, mask);
-
-       return 0;
-}
-
-
-int native_ioapic_set_affinity(struct irq_data *data,
-                              const struct cpumask *mask,
-                              bool force)
-{
-       unsigned int dest, irq = data->irq;
-       unsigned long flags;
-       int ret;
+       unsigned int dest, irq = data->irq;
+       unsigned long flags;
+       int ret;
 
        if (!config_enabled(CONFIG_SMP))
                return -EPERM;
 
        raw_spin_lock_irqsave(&ioapic_lock, flags);
-       ret = __ioapic_set_affinity(data, mask, &dest);
+       ret = apic_set_affinity(data, mask, &dest);
        if (!ret) {
                /* Only the high 8 bits are valid. */
                dest = SET_APIC_LOGICAL_ID(dest);
-               __target_IO_APIC_irq(irq, dest, data->chip_data);
+               __target_IO_APIC_irq(irq, dest, irqd_cfg(data));
                ret = IRQ_SET_MASK_OK_NOCOPY;
        }
        raw_spin_unlock_irqrestore(&ioapic_lock, flags);
        return ret;
 }
 
-static void ack_apic_edge(struct irq_data *data)
-{
-       irq_complete_move(data->chip_data);
-       irq_move_irq(data);
-       ack_APIC_irq();
-}
-
 atomic_t irq_mis_count;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -2547,9 +1941,9 @@ static inline void ioapic_irqd_unmask(struct irq_data *data,
 }
 #endif
 
-static void ack_apic_level(struct irq_data *data)
+static void ack_ioapic_level(struct irq_data *data)
 {
-       struct irq_cfg *cfg = data->chip_data;
+       struct irq_cfg *cfg = irqd_cfg(data);
        int i, irq = data->irq;
        unsigned long v;
        bool masked;
@@ -2619,10 +2013,10 @@ static struct irq_chip ioapic_chip __read_mostly = {
        .irq_startup            = startup_ioapic_irq,
        .irq_mask               = mask_ioapic_irq,
        .irq_unmask             = unmask_ioapic_irq,
-       .irq_ack                = ack_apic_edge,
-       .irq_eoi                = ack_apic_level,
+       .irq_ack                = apic_ack_edge,
+       .irq_eoi                = ack_ioapic_level,
        .irq_set_affinity       = native_ioapic_set_affinity,
-       .irq_retrigger          = ioapic_retrigger_irq,
+       .irq_retrigger          = apic_retrigger_irq,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -2965,6 +2359,16 @@ static int mp_irqdomain_create(int ioapic)
        return 0;
 }
 
+static void ioapic_destroy_irqdomain(int idx)
+{
+       if (ioapics[idx].irqdomain) {
+               irq_domain_remove(ioapics[idx].irqdomain);
+               ioapics[idx].irqdomain = NULL;
+       }
+       kfree(ioapics[idx].pin_info);
+       ioapics[idx].pin_info = NULL;
+}
+
 void __init setup_IO_APIC(void)
 {
        int ioapic;
@@ -3044,399 +2448,6 @@ static int __init ioapic_init_ops(void)
 
 device_initcall(ioapic_init_ops);
 
-/*
- * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
- */
-int arch_setup_hwirq(unsigned int irq, int node)
-{
-       struct irq_cfg *cfg;
-       unsigned long flags;
-       int ret;
-
-       cfg = alloc_irq_cfg(irq, node);
-       if (!cfg)
-               return -ENOMEM;
-
-       raw_spin_lock_irqsave(&vector_lock, flags);
-       ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
-       raw_spin_unlock_irqrestore(&vector_lock, flags);
-
-       if (!ret)
-               irq_set_chip_data(irq, cfg);
-       else
-               free_irq_cfg(irq, cfg);
-       return ret;
-}
-
-void arch_teardown_hwirq(unsigned int irq)
-{
-       struct irq_cfg *cfg = irq_cfg(irq);
-       unsigned long flags;
-
-       free_remapped_irq(irq);
-       raw_spin_lock_irqsave(&vector_lock, flags);
-       __clear_irq_vector(irq, cfg);
-       raw_spin_unlock_irqrestore(&vector_lock, flags);
-       free_irq_cfg(irq, cfg);
-}
-
-/*
- * MSI message composition
- */
-void native_compose_msi_msg(struct pci_dev *pdev,
-                           unsigned int irq, unsigned int dest,
-                           struct msi_msg *msg, u8 hpet_id)
-{
-       struct irq_cfg *cfg = irq_cfg(irq);
-
-       msg->address_hi = MSI_ADDR_BASE_HI;
-
-       if (x2apic_enabled())
-               msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
-
-       msg->address_lo =
-               MSI_ADDR_BASE_LO |
-               ((apic->irq_dest_mode == 0) ?
-                       MSI_ADDR_DEST_MODE_PHYSICAL:
-                       MSI_ADDR_DEST_MODE_LOGICAL) |
-               ((apic->irq_delivery_mode != dest_LowestPrio) ?
-                       MSI_ADDR_REDIRECTION_CPU:
-                       MSI_ADDR_REDIRECTION_LOWPRI) |
-               MSI_ADDR_DEST_ID(dest);
-
-       msg->data =
-               MSI_DATA_TRIGGER_EDGE |
-               MSI_DATA_LEVEL_ASSERT |
-               ((apic->irq_delivery_mode != dest_LowestPrio) ?
-                       MSI_DATA_DELIVERY_FIXED:
-                       MSI_DATA_DELIVERY_LOWPRI) |
-               MSI_DATA_VECTOR(cfg->vector);
-}
-
-#ifdef CONFIG_PCI_MSI
-static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
-                          struct msi_msg *msg, u8 hpet_id)
-{
-       struct irq_cfg *cfg;
-       int err;
-       unsigned dest;
-
-       if (disable_apic)
-               return -ENXIO;
-
-       cfg = irq_cfg(irq);
-       err = assign_irq_vector(irq, cfg, apic->target_cpus());
-       if (err)
-               return err;
-
-       err = apic->cpu_mask_to_apicid_and(cfg->domain,
-                                          apic->target_cpus(), &dest);
-       if (err)
-               return err;
-
-       x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
-
-       return 0;
-}
-
-static int
-msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
-{
-       struct irq_cfg *cfg = data->chip_data;
-       struct msi_msg msg;
-       unsigned int dest;
-       int ret;
-
-       ret = __ioapic_set_affinity(data, mask, &dest);
-       if (ret)
-               return ret;
-
-       __get_cached_msi_msg(data->msi_desc, &msg);
-
-       msg.data &= ~MSI_DATA_VECTOR_MASK;
-       msg.data |= MSI_DATA_VECTOR(cfg->vector);
-       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
-       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-
-       __pci_write_msi_msg(data->msi_desc, &msg);
-
-       return IRQ_SET_MASK_OK_NOCOPY;
-}
-
-/*
- * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
- * which implement the MSI or MSI-X Capability Structure.
- */
-static struct irq_chip msi_chip = {
-       .name                   = "PCI-MSI",
-       .irq_unmask             = pci_msi_unmask_irq,
-       .irq_mask               = pci_msi_mask_irq,
-       .irq_ack                = ack_apic_edge,
-       .irq_set_affinity       = msi_set_affinity,
-       .irq_retrigger          = ioapic_retrigger_irq,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
-                 unsigned int irq_base, unsigned int irq_offset)
-{
-       struct irq_chip *chip = &msi_chip;
-       struct msi_msg msg;
-       unsigned int irq = irq_base + irq_offset;
-       int ret;
-
-       ret = msi_compose_msg(dev, irq, &msg, -1);
-       if (ret < 0)
-               return ret;
-
-       irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
-
-       /*
-        * MSI-X message is written per-IRQ, the offset is always 0.
-        * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
-        */
-       if (!irq_offset)
-               pci_write_msi_msg(irq, &msg);
-
-       setup_remapped_irq(irq, irq_cfg(irq), chip);
-
-       irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
-
-       dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
-
-       return 0;
-}
-
-int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
-{
-       struct msi_desc *msidesc;
-       unsigned int irq;
-       int node, ret;
-
-       /* Multiple MSI vectors only supported with interrupt remapping */
-       if (type == PCI_CAP_ID_MSI && nvec > 1)
-               return 1;
-
-       node = dev_to_node(&dev->dev);
-
-       list_for_each_entry(msidesc, &dev->msi_list, list) {
-               irq = irq_alloc_hwirq(node);
-               if (!irq)
-                       return -ENOSPC;
-
-               ret = setup_msi_irq(dev, msidesc, irq, 0);
-               if (ret < 0) {
-                       irq_free_hwirq(irq);
-                       return ret;
-               }
-
-       }
-       return 0;
-}
-
-void native_teardown_msi_irq(unsigned int irq)
-{
-       irq_free_hwirq(irq);
-}
-
-#ifdef CONFIG_DMAR_TABLE
-static int
-dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
-                     bool force)
-{
-       struct irq_cfg *cfg = data->chip_data;
-       unsigned int dest, irq = data->irq;
-       struct msi_msg msg;
-       int ret;
-
-       ret = __ioapic_set_affinity(data, mask, &dest);
-       if (ret)
-               return ret;
-
-       dmar_msi_read(irq, &msg);
-
-       msg.data &= ~MSI_DATA_VECTOR_MASK;
-       msg.data |= MSI_DATA_VECTOR(cfg->vector);
-       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
-       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-       msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
-
-       dmar_msi_write(irq, &msg);
-
-       return IRQ_SET_MASK_OK_NOCOPY;
-}
-
-static struct irq_chip dmar_msi_type = {
-       .name                   = "DMAR_MSI",
-       .irq_unmask             = dmar_msi_unmask,
-       .irq_mask               = dmar_msi_mask,
-       .irq_ack                = ack_apic_edge,
-       .irq_set_affinity       = dmar_msi_set_affinity,
-       .irq_retrigger          = ioapic_retrigger_irq,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int arch_setup_dmar_msi(unsigned int irq)
-{
-       int ret;
-       struct msi_msg msg;
-
-       ret = msi_compose_msg(NULL, irq, &msg, -1);
-       if (ret < 0)
-               return ret;
-       dmar_msi_write(irq, &msg);
-       irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
-                                     "edge");
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_HPET_TIMER
-
-static int hpet_msi_set_affinity(struct irq_data *data,
-                                const struct cpumask *mask, bool force)
-{
-       struct irq_cfg *cfg = data->chip_data;
-       struct msi_msg msg;
-       unsigned int dest;
-       int ret;
-
-       ret = __ioapic_set_affinity(data, mask, &dest);
-       if (ret)
-               return ret;
-
-       hpet_msi_read(data->handler_data, &msg);
-
-       msg.data &= ~MSI_DATA_VECTOR_MASK;
-       msg.data |= MSI_DATA_VECTOR(cfg->vector);
-       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
-       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-
-       hpet_msi_write(data->handler_data, &msg);
-
-       return IRQ_SET_MASK_OK_NOCOPY;
-}
-
-static struct irq_chip hpet_msi_type = {
-       .name = "HPET_MSI",
-       .irq_unmask = hpet_msi_unmask,
-       .irq_mask = hpet_msi_mask,
-       .irq_ack = ack_apic_edge,
-       .irq_set_affinity = hpet_msi_set_affinity,
-       .irq_retrigger = ioapic_retrigger_irq,
-       .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int default_setup_hpet_msi(unsigned int irq, unsigned int id)
-{
-       struct irq_chip *chip = &hpet_msi_type;
-       struct msi_msg msg;
-       int ret;
-
-       ret = msi_compose_msg(NULL, irq, &msg, id);
-       if (ret < 0)
-               return ret;
-
-       hpet_msi_write(irq_get_handler_data(irq), &msg);
-       irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-       setup_remapped_irq(irq, irq_cfg(irq), chip);
-
-       irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
-       return 0;
-}
-#endif
-
-#endif /* CONFIG_PCI_MSI */
-/*
- * Hypertransport interrupt support
- */
-#ifdef CONFIG_HT_IRQ
-
-static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
-{
-       struct ht_irq_msg msg;
-       fetch_ht_irq_msg(irq, &msg);
-
-       msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
-       msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
-
-       msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
-       msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
-
-       write_ht_irq_msg(irq, &msg);
-}
-
-static int
-ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
-{
-       struct irq_cfg *cfg = data->chip_data;
-       unsigned int dest;
-       int ret;
-
-       ret = __ioapic_set_affinity(data, mask, &dest);
-       if (ret)
-               return ret;
-
-       target_ht_irq(data->irq, dest, cfg->vector);
-       return IRQ_SET_MASK_OK_NOCOPY;
-}
-
-static struct irq_chip ht_irq_chip = {
-       .name                   = "PCI-HT",
-       .irq_mask               = mask_ht_irq,
-       .irq_unmask             = unmask_ht_irq,
-       .irq_ack                = ack_apic_edge,
-       .irq_set_affinity       = ht_set_affinity,
-       .irq_retrigger          = ioapic_retrigger_irq,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
-{
-       struct irq_cfg *cfg;
-       struct ht_irq_msg msg;
-       unsigned dest;
-       int err;
-
-       if (disable_apic)
-               return -ENXIO;
-
-       cfg = irq_cfg(irq);
-       err = assign_irq_vector(irq, cfg, apic->target_cpus());
-       if (err)
-               return err;
-
-       err = apic->cpu_mask_to_apicid_and(cfg->domain,
-                                          apic->target_cpus(), &dest);
-       if (err)
-               return err;
-
-       msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
-
-       msg.address_lo =
-               HT_IRQ_LOW_BASE |
-               HT_IRQ_LOW_DEST_ID(dest) |
-               HT_IRQ_LOW_VECTOR(cfg->vector) |
-               ((apic->irq_dest_mode == 0) ?
-                       HT_IRQ_LOW_DM_PHYSICAL :
-                       HT_IRQ_LOW_DM_LOGICAL) |
-               HT_IRQ_LOW_RQEOI_EDGE |
-               ((apic->irq_delivery_mode != dest_LowestPrio) ?
-                       HT_IRQ_LOW_MT_FIXED :
-                       HT_IRQ_LOW_MT_ARBITRATED) |
-               HT_IRQ_LOW_IRQ_MASKED;
-
-       write_ht_irq_msg(irq, &msg);
-
-       irq_set_chip_and_handler_name(irq, &ht_irq_chip,
-                                     handle_edge_irq, "edge");
-
-       dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
-
-       return 0;
-}
-#endif /* CONFIG_HT_IRQ */
-
 static int
 io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
 {
@@ -3451,7 +2462,7 @@ io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
        return ret;
 }
 
-static int __init io_apic_get_redir_entries(int ioapic)
+static int io_apic_get_redir_entries(int ioapic)
 {
        union IO_APIC_reg_01    reg_01;
        unsigned long flags;
@@ -3476,28 +2487,8 @@ unsigned int arch_dynirq_lower_bound(unsigned int from)
        return ioapic_initialized ? ioapic_dynirq_base : gsi_top;
 }
 
-int __init arch_probe_nr_irqs(void)
-{
-       int nr;
-
-       if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
-               nr_irqs = NR_VECTORS * nr_cpu_ids;
-
-       nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
-#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
-       /*
-        * for MSI and HT dyn irq
-        */
-       nr += gsi_top * 16;
-#endif
-       if (nr < nr_irqs)
-               nr_irqs = nr;
-
-       return 0;
-}
-
 #ifdef CONFIG_X86_32
-static int __init io_apic_get_unique_id(int ioapic, int apic_id)
+static int io_apic_get_unique_id(int ioapic, int apic_id)
 {
        union IO_APIC_reg_00 reg_00;
        static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
@@ -3572,30 +2563,63 @@ static int __init io_apic_get_unique_id(int ioapic, int apic_id)
        return apic_id;
 }
 
-static u8 __init io_apic_unique_id(u8 id)
+static u8 io_apic_unique_id(int idx, u8 id)
 {
        if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
            !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
-               return io_apic_get_unique_id(nr_ioapics, id);
+               return io_apic_get_unique_id(idx, id);
        else
                return id;
 }
 #else
-static u8 __init io_apic_unique_id(u8 id)
+static u8 io_apic_unique_id(int idx, u8 id)
 {
-       int i;
+       union IO_APIC_reg_00 reg_00;
        DECLARE_BITMAP(used, 256);
+       unsigned long flags;
+       u8 new_id;
+       int i;
 
        bitmap_zero(used, 256);
        for_each_ioapic(i)
                __set_bit(mpc_ioapic_id(i), used);
+
+       /* Hand out the requested id if available */
        if (!test_bit(id, used))
                return id;
-       return find_first_zero_bit(used, 256);
+
+       /*
+        * Read the current id from the ioapic and keep it if
+        * available.
+        */
+       raw_spin_lock_irqsave(&ioapic_lock, flags);
+       reg_00.raw = io_apic_read(idx, 0);
+       raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+       new_id = reg_00.bits.ID;
+       if (!test_bit(new_id, used)) {
+               apic_printk(APIC_VERBOSE, KERN_INFO
+                       "IOAPIC[%d]: Using reg apic_id %d instead of %d\n",
+                        idx, new_id, id);
+               return new_id;
+       }
+
+       /*
+        * Get the next free id and write it to the ioapic.
+        */
+       new_id = find_first_zero_bit(used, 256);
+       reg_00.bits.ID = new_id;
+       raw_spin_lock_irqsave(&ioapic_lock, flags);
+       io_apic_write(idx, 0, reg_00.raw);
+       reg_00.raw = io_apic_read(idx, 0);
+       raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+       /* Sanity check */
+       BUG_ON(reg_00.bits.ID != new_id);
+
+       return new_id;
 }
 #endif
 
-static int __init io_apic_get_version(int ioapic)
+static int io_apic_get_version(int ioapic)
 {
        union IO_APIC_reg_01    reg_01;
        unsigned long flags;
@@ -3702,6 +2726,7 @@ static struct resource * __init ioapic_setup_resources(void)
                snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
                mem += IOAPIC_RESOURCE_NAME_SIZE;
                num++;
+               ioapics[i].iomem_res = res;
        }
 
        ioapic_resources = res;
@@ -3799,21 +2824,7 @@ int mp_find_ioapic_pin(int ioapic, u32 gsi)
        return gsi - gsi_cfg->gsi_base;
 }
 
-static __init int bad_ioapic(unsigned long address)
-{
-       if (nr_ioapics >= MAX_IO_APICS) {
-               pr_warn("WARNING: Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
-                       MAX_IO_APICS, nr_ioapics);
-               return 1;
-       }
-       if (!address) {
-               pr_warn("WARNING: Bogus (zero) I/O APIC address found in table, skipping!\n");
-               return 1;
-       }
-       return 0;
-}
-
-static __init int bad_ioapic_register(int idx)
+static int bad_ioapic_register(int idx)
 {
        union IO_APIC_reg_00 reg_00;
        union IO_APIC_reg_01 reg_01;
@@ -3832,32 +2843,61 @@ static __init int bad_ioapic_register(int idx)
        return 0;
 }
 
-void __init mp_register_ioapic(int id, u32 address, u32 gsi_base,
-                              struct ioapic_domain_cfg *cfg)
+static int find_free_ioapic_entry(void)
 {
-       int idx = 0;
-       int entries;
+       int idx;
+
+       for (idx = 0; idx < MAX_IO_APICS; idx++)
+               if (ioapics[idx].nr_registers == 0)
+                       return idx;
+
+       return MAX_IO_APICS;
+}
+
+/**
+ * mp_register_ioapic - Register an IOAPIC device
+ * @id:                hardware IOAPIC ID
+ * @address:   physical address of IOAPIC register area
+ * @gsi_base:  base of GSI associated with the IOAPIC
+ * @cfg:       configuration information for the IOAPIC
+ */
+int mp_register_ioapic(int id, u32 address, u32 gsi_base,
+                      struct ioapic_domain_cfg *cfg)
+{
+       bool hotplug = !!ioapic_initialized;
        struct mp_ioapic_gsi *gsi_cfg;
+       int idx, ioapic, entries;
+       u32 gsi_end;
 
-       if (bad_ioapic(address))
-               return;
+       if (!address) {
+               pr_warn("Bogus (zero) I/O APIC address found, skipping!\n");
+               return -EINVAL;
+       }
+       for_each_ioapic(ioapic)
+               if (ioapics[ioapic].mp_config.apicaddr == address) {
+                       pr_warn("address 0x%x conflicts with IOAPIC%d\n",
+                               address, ioapic);
+                       return -EEXIST;
+               }
 
-       idx = nr_ioapics;
+       idx = find_free_ioapic_entry();
+       if (idx >= MAX_IO_APICS) {
+               pr_warn("Max # of I/O APICs (%d) exceeded (found %d), skipping\n",
+                       MAX_IO_APICS, idx);
+               return -ENOSPC;
+       }
 
        ioapics[idx].mp_config.type = MP_IOAPIC;
        ioapics[idx].mp_config.flags = MPC_APIC_USABLE;
        ioapics[idx].mp_config.apicaddr = address;
-       ioapics[idx].irqdomain = NULL;
-       ioapics[idx].irqdomain_cfg = *cfg;
 
        set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
-
        if (bad_ioapic_register(idx)) {
                clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
-               return;
+               return -ENODEV;
        }
 
-       ioapics[idx].mp_config.apicid = io_apic_unique_id(id);
+       ioapics[idx].mp_config.apicid = io_apic_unique_id(idx, id);
        ioapics[idx].mp_config.apicver = io_apic_get_version(idx);
 
        /*
@@ -3865,24 +2905,112 @@ void __init mp_register_ioapic(int id, u32 address, u32 gsi_base,
         * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
         */
        entries = io_apic_get_redir_entries(idx);
+       gsi_end = gsi_base + entries - 1;
+       for_each_ioapic(ioapic) {
+               gsi_cfg = mp_ioapic_gsi_routing(ioapic);
+               if ((gsi_base >= gsi_cfg->gsi_base &&
+                    gsi_base <= gsi_cfg->gsi_end) ||
+                   (gsi_end >= gsi_cfg->gsi_base &&
+                    gsi_end <= gsi_cfg->gsi_end)) {
+                       pr_warn("GSI range [%u-%u] for new IOAPIC conflicts with GSI[%u-%u]\n",
+                               gsi_base, gsi_end,
+                               gsi_cfg->gsi_base, gsi_cfg->gsi_end);
+                       clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
+                       return -ENOSPC;
+               }
+       }
        gsi_cfg = mp_ioapic_gsi_routing(idx);
        gsi_cfg->gsi_base = gsi_base;
-       gsi_cfg->gsi_end = gsi_base + entries - 1;
+       gsi_cfg->gsi_end = gsi_end;
+
+       ioapics[idx].irqdomain = NULL;
+       ioapics[idx].irqdomain_cfg = *cfg;
 
        /*
-        * The number of IO-APIC IRQ registers (== #pins):
+        * If mp_register_ioapic() is called during early boot stage when
+        * walking ACPI/SFI/DT tables, it's too early to create irqdomain,
+        * we are still using bootmem allocator. So delay it to setup_IO_APIC().
         */
-       ioapics[idx].nr_registers = entries;
+       if (hotplug) {
+               if (mp_irqdomain_create(idx)) {
+                       clear_fixmap(FIX_IO_APIC_BASE_0 + idx);
+                       return -ENOMEM;
+               }
+               alloc_ioapic_saved_registers(idx);
+       }
 
        if (gsi_cfg->gsi_end >= gsi_top)
                gsi_top = gsi_cfg->gsi_end + 1;
+       if (nr_ioapics <= idx)
+               nr_ioapics = idx + 1;
+
+       /* Set nr_registers to mark entry present */
+       ioapics[idx].nr_registers = entries;
 
        pr_info("IOAPIC[%d]: apic_id %d, version %d, address 0x%x, GSI %d-%d\n",
                idx, mpc_ioapic_id(idx),
                mpc_ioapic_ver(idx), mpc_ioapic_addr(idx),
                gsi_cfg->gsi_base, gsi_cfg->gsi_end);
 
-       nr_ioapics++;
+       return 0;
+}
+
+int mp_unregister_ioapic(u32 gsi_base)
+{
+       int ioapic, pin;
+       int found = 0;
+       struct mp_pin_info *pin_info;
+
+       for_each_ioapic(ioapic)
+               if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) {
+                       found = 1;
+                       break;
+               }
+       if (!found) {
+               pr_warn("can't find IOAPIC for GSI %d\n", gsi_base);
+               return -ENODEV;
+       }
+
+       for_each_pin(ioapic, pin) {
+               pin_info = mp_pin_info(ioapic, pin);
+               if (pin_info->count) {
+                       pr_warn("pin%d on IOAPIC%d is still in use.\n",
+                               pin, ioapic);
+                       return -EBUSY;
+               }
+       }
+
+       /* Mark entry not present */
+       ioapics[ioapic].nr_registers  = 0;
+       ioapic_destroy_irqdomain(ioapic);
+       free_ioapic_saved_registers(ioapic);
+       if (ioapics[ioapic].iomem_res)
+               release_resource(ioapics[ioapic].iomem_res);
+       clear_fixmap(FIX_IO_APIC_BASE_0 + ioapic);
+       memset(&ioapics[ioapic], 0, sizeof(ioapics[ioapic]));
+
+       return 0;
+}
+
+int mp_ioapic_registered(u32 gsi_base)
+{
+       int ioapic;
+
+       for_each_ioapic(ioapic)
+               if (ioapics[ioapic].gsi_config.gsi_base == gsi_base)
+                       return 1;
+
+       return 0;
+}
+
+static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
+                                       int ioapic, int ioapic_pin,
+                                       int trigger, int polarity)
+{
+       irq_attr->ioapic        = ioapic;
+       irq_attr->ioapic_pin    = ioapic_pin;
+       irq_attr->trigger       = trigger;
+       irq_attr->polarity      = polarity;
 }
 
 int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq,
@@ -3931,7 +3059,7 @@ void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq)
 
        ioapic_mask_entry(ioapic, pin);
        __remove_pin_from_irq(cfg, ioapic, pin);
-       WARN_ON(cfg->irq_2_pin != NULL);
+       WARN_ON(!list_empty(&cfg->irq_2_pin));
        arch_teardown_hwirq(virq);
 }
 
@@ -3964,18 +3092,6 @@ int mp_set_gsi_attr(u32 gsi, int trigger, int polarity, int node)
        return ret;
 }
 
-bool mp_should_keep_irq(struct device *dev)
-{
-       if (dev->power.is_prepared)
-               return true;
-#ifdef CONFIG_PM
-       if (dev->power.runtime_status == RPM_SUSPENDING)
-               return true;
-#endif
-
-       return false;
-}
-
 /* Enable IOAPIC early just for system timer */
 void __init pre_init_apic_IRQ0(void)
 {
diff --git a/arch/x86/kernel/apic/msi.c b/arch/x86/kernel/apic/msi.c
new file mode 100644 (file)
index 0000000..d6ba2d6
--- /dev/null
@@ -0,0 +1,286 @@
+/*
+ * Support of MSI, HPET and DMAR interrupts.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
+ *     Moved from arch/x86/kernel/apic/io_apic.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/dmar.h>
+#include <linux/hpet.h>
+#include <linux/msi.h>
+#include <asm/msidef.h>
+#include <asm/hpet.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/irq_remapping.h>
+
+void native_compose_msi_msg(struct pci_dev *pdev,
+                           unsigned int irq, unsigned int dest,
+                           struct msi_msg *msg, u8 hpet_id)
+{
+       struct irq_cfg *cfg = irq_cfg(irq);
+
+       msg->address_hi = MSI_ADDR_BASE_HI;
+
+       if (x2apic_enabled())
+               msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
+
+       msg->address_lo =
+               MSI_ADDR_BASE_LO |
+               ((apic->irq_dest_mode == 0) ?
+                       MSI_ADDR_DEST_MODE_PHYSICAL :
+                       MSI_ADDR_DEST_MODE_LOGICAL) |
+               ((apic->irq_delivery_mode != dest_LowestPrio) ?
+                       MSI_ADDR_REDIRECTION_CPU :
+                       MSI_ADDR_REDIRECTION_LOWPRI) |
+               MSI_ADDR_DEST_ID(dest);
+
+       msg->data =
+               MSI_DATA_TRIGGER_EDGE |
+               MSI_DATA_LEVEL_ASSERT |
+               ((apic->irq_delivery_mode != dest_LowestPrio) ?
+                       MSI_DATA_DELIVERY_FIXED :
+                       MSI_DATA_DELIVERY_LOWPRI) |
+               MSI_DATA_VECTOR(cfg->vector);
+}
+
+static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
+                          struct msi_msg *msg, u8 hpet_id)
+{
+       struct irq_cfg *cfg;
+       int err;
+       unsigned dest;
+
+       if (disable_apic)
+               return -ENXIO;
+
+       cfg = irq_cfg(irq);
+       err = assign_irq_vector(irq, cfg, apic->target_cpus());
+       if (err)
+               return err;
+
+       err = apic->cpu_mask_to_apicid_and(cfg->domain,
+                                          apic->target_cpus(), &dest);
+       if (err)
+               return err;
+
+       x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+
+       return 0;
+}
+
+static int
+msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
+{
+       struct irq_cfg *cfg = irqd_cfg(data);
+       struct msi_msg msg;
+       unsigned int dest;
+       int ret;
+
+       ret = apic_set_affinity(data, mask, &dest);
+       if (ret)
+               return ret;
+
+       __get_cached_msi_msg(data->msi_desc, &msg);
+
+       msg.data &= ~MSI_DATA_VECTOR_MASK;
+       msg.data |= MSI_DATA_VECTOR(cfg->vector);
+       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+       __pci_write_msi_msg(data->msi_desc, &msg);
+
+       return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+/*
+ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
+ * which implement the MSI or MSI-X Capability Structure.
+ */
+static struct irq_chip msi_chip = {
+       .name                   = "PCI-MSI",
+       .irq_unmask             = pci_msi_unmask_irq,
+       .irq_mask               = pci_msi_mask_irq,
+       .irq_ack                = apic_ack_edge,
+       .irq_set_affinity       = msi_set_affinity,
+       .irq_retrigger          = apic_retrigger_irq,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
+                 unsigned int irq_base, unsigned int irq_offset)
+{
+       struct irq_chip *chip = &msi_chip;
+       struct msi_msg msg;
+       unsigned int irq = irq_base + irq_offset;
+       int ret;
+
+       ret = msi_compose_msg(dev, irq, &msg, -1);
+       if (ret < 0)
+               return ret;
+
+       irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
+
+       /*
+        * MSI-X message is written per-IRQ, the offset is always 0.
+        * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
+        */
+       if (!irq_offset)
+               pci_write_msi_msg(irq, &msg);
+
+       setup_remapped_irq(irq, irq_cfg(irq), chip);
+
+       irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
+
+       dev_dbg(&dev->dev, "irq %d for MSI/MSI-X\n", irq);
+
+       return 0;
+}
+
+int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+{
+       struct msi_desc *msidesc;
+       unsigned int irq;
+       int node, ret;
+
+       /* Multiple MSI vectors only supported with interrupt remapping */
+       if (type == PCI_CAP_ID_MSI && nvec > 1)
+               return 1;
+
+       node = dev_to_node(&dev->dev);
+
+       list_for_each_entry(msidesc, &dev->msi_list, list) {
+               irq = irq_alloc_hwirq(node);
+               if (!irq)
+                       return -ENOSPC;
+
+               ret = setup_msi_irq(dev, msidesc, irq, 0);
+               if (ret < 0) {
+                       irq_free_hwirq(irq);
+                       return ret;
+               }
+
+       }
+       return 0;
+}
+
+void native_teardown_msi_irq(unsigned int irq)
+{
+       irq_free_hwirq(irq);
+}
+
+#ifdef CONFIG_DMAR_TABLE
+static int
+dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
+                     bool force)
+{
+       struct irq_cfg *cfg = irqd_cfg(data);
+       unsigned int dest, irq = data->irq;
+       struct msi_msg msg;
+       int ret;
+
+       ret = apic_set_affinity(data, mask, &dest);
+       if (ret)
+               return ret;
+
+       dmar_msi_read(irq, &msg);
+
+       msg.data &= ~MSI_DATA_VECTOR_MASK;
+       msg.data |= MSI_DATA_VECTOR(cfg->vector);
+       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+       msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
+
+       dmar_msi_write(irq, &msg);
+
+       return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static struct irq_chip dmar_msi_type = {
+       .name                   = "DMAR_MSI",
+       .irq_unmask             = dmar_msi_unmask,
+       .irq_mask               = dmar_msi_mask,
+       .irq_ack                = apic_ack_edge,
+       .irq_set_affinity       = dmar_msi_set_affinity,
+       .irq_retrigger          = apic_retrigger_irq,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int arch_setup_dmar_msi(unsigned int irq)
+{
+       int ret;
+       struct msi_msg msg;
+
+       ret = msi_compose_msg(NULL, irq, &msg, -1);
+       if (ret < 0)
+               return ret;
+       dmar_msi_write(irq, &msg);
+       irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
+                                     "edge");
+       return 0;
+}
+#endif
+
+/*
+ * MSI message composition
+ */
+#ifdef CONFIG_HPET_TIMER
+
+static int hpet_msi_set_affinity(struct irq_data *data,
+                                const struct cpumask *mask, bool force)
+{
+       struct irq_cfg *cfg = irqd_cfg(data);
+       struct msi_msg msg;
+       unsigned int dest;
+       int ret;
+
+       ret = apic_set_affinity(data, mask, &dest);
+       if (ret)
+               return ret;
+
+       hpet_msi_read(data->handler_data, &msg);
+
+       msg.data &= ~MSI_DATA_VECTOR_MASK;
+       msg.data |= MSI_DATA_VECTOR(cfg->vector);
+       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
+       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+
+       hpet_msi_write(data->handler_data, &msg);
+
+       return IRQ_SET_MASK_OK_NOCOPY;
+}
+
+static struct irq_chip hpet_msi_type = {
+       .name = "HPET_MSI",
+       .irq_unmask = hpet_msi_unmask,
+       .irq_mask = hpet_msi_mask,
+       .irq_ack = apic_ack_edge,
+       .irq_set_affinity = hpet_msi_set_affinity,
+       .irq_retrigger = apic_retrigger_irq,
+       .flags = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int default_setup_hpet_msi(unsigned int irq, unsigned int id)
+{
+       struct irq_chip *chip = &hpet_msi_type;
+       struct msi_msg msg;
+       int ret;
+
+       ret = msi_compose_msg(NULL, irq, &msg, id);
+       if (ret < 0)
+               return ret;
+
+       hpet_msi_write(irq_get_handler_data(irq), &msg);
+       irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
+       setup_remapped_irq(irq, irq_cfg(irq), chip);
+
+       irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
+       return 0;
+}
+#endif
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
new file mode 100644 (file)
index 0000000..6cedd79
--- /dev/null
@@ -0,0 +1,719 @@
+/*
+ * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
+ *
+ * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
+ *     Moved from arch/x86/kernel/apic/io_apic.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/compiler.h>
+#include <linux/irqdomain.h>
+#include <linux/slab.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/i8259.h>
+#include <asm/desc.h>
+#include <asm/irq_remapping.h>
+
+static DEFINE_RAW_SPINLOCK(vector_lock);
+
+void lock_vector_lock(void)
+{
+       /* Used to the online set of cpus does not change
+        * during assign_irq_vector.
+        */
+       raw_spin_lock(&vector_lock);
+}
+
+void unlock_vector_lock(void)
+{
+       raw_spin_unlock(&vector_lock);
+}
+
+struct irq_cfg *irq_cfg(unsigned int irq)
+{
+       return irq_get_chip_data(irq);
+}
+
+struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
+{
+       return irq_data->chip_data;
+}
+
+static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
+{
+       struct irq_cfg *cfg;
+
+       cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
+       if (!cfg)
+               return NULL;
+       if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
+               goto out_cfg;
+       if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
+               goto out_domain;
+#ifdef CONFIG_X86_IO_APIC
+       INIT_LIST_HEAD(&cfg->irq_2_pin);
+#endif
+       return cfg;
+out_domain:
+       free_cpumask_var(cfg->domain);
+out_cfg:
+       kfree(cfg);
+       return NULL;
+}
+
+struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
+{
+       int res = irq_alloc_desc_at(at, node);
+       struct irq_cfg *cfg;
+
+       if (res < 0) {
+               if (res != -EEXIST)
+                       return NULL;
+               cfg = irq_cfg(at);
+               if (cfg)
+                       return cfg;
+       }
+
+       cfg = alloc_irq_cfg(at, node);
+       if (cfg)
+               irq_set_chip_data(at, cfg);
+       else
+               irq_free_desc(at);
+       return cfg;
+}
+
+static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
+{
+       if (!cfg)
+               return;
+       irq_set_chip_data(at, NULL);
+       free_cpumask_var(cfg->domain);
+       free_cpumask_var(cfg->old_domain);
+       kfree(cfg);
+}
+
+static int
+__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+{
+       /*
+        * NOTE! The local APIC isn't very good at handling
+        * multiple interrupts at the same interrupt level.
+        * As the interrupt level is determined by taking the
+        * vector number and shifting that right by 4, we
+        * want to spread these out a bit so that they don't
+        * all fall in the same interrupt level.
+        *
+        * Also, we've got to be careful not to trash gate
+        * 0x80, because int 0x80 is hm, kind of importantish. ;)
+        */
+       static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
+       static int current_offset = VECTOR_OFFSET_START % 16;
+       int cpu, err;
+       cpumask_var_t tmp_mask;
+
+       if (cfg->move_in_progress)
+               return -EBUSY;
+
+       if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
+               return -ENOMEM;
+
+       /* Only try and allocate irqs on cpus that are present */
+       err = -ENOSPC;
+       cpumask_clear(cfg->old_domain);
+       cpu = cpumask_first_and(mask, cpu_online_mask);
+       while (cpu < nr_cpu_ids) {
+               int new_cpu, vector, offset;
+
+               apic->vector_allocation_domain(cpu, tmp_mask, mask);
+
+               if (cpumask_subset(tmp_mask, cfg->domain)) {
+                       err = 0;
+                       if (cpumask_equal(tmp_mask, cfg->domain))
+                               break;
+                       /*
+                        * New cpumask using the vector is a proper subset of
+                        * the current in use mask. So cleanup the vector
+                        * allocation for the members that are not used anymore.
+                        */
+                       cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
+                       cfg->move_in_progress =
+                          cpumask_intersects(cfg->old_domain, cpu_online_mask);
+                       cpumask_and(cfg->domain, cfg->domain, tmp_mask);
+                       break;
+               }
+
+               vector = current_vector;
+               offset = current_offset;
+next:
+               vector += 16;
+               if (vector >= first_system_vector) {
+                       offset = (offset + 1) % 16;
+                       vector = FIRST_EXTERNAL_VECTOR + offset;
+               }
+
+               if (unlikely(current_vector == vector)) {
+                       cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
+                       cpumask_andnot(tmp_mask, mask, cfg->old_domain);
+                       cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
+                       continue;
+               }
+
+               if (test_bit(vector, used_vectors))
+                       goto next;
+
+               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
+                       if (per_cpu(vector_irq, new_cpu)[vector] >
+                           VECTOR_UNDEFINED)
+                               goto next;
+               }
+               /* Found one! */
+               current_vector = vector;
+               current_offset = offset;
+               if (cfg->vector) {
+                       cpumask_copy(cfg->old_domain, cfg->domain);
+                       cfg->move_in_progress =
+                          cpumask_intersects(cfg->old_domain, cpu_online_mask);
+               }
+               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
+                       per_cpu(vector_irq, new_cpu)[vector] = irq;
+               cfg->vector = vector;
+               cpumask_copy(cfg->domain, tmp_mask);
+               err = 0;
+               break;
+       }
+       free_cpumask_var(tmp_mask);
+
+       return err;
+}
+
+int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+{
+       int err;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&vector_lock, flags);
+       err = __assign_irq_vector(irq, cfg, mask);
+       raw_spin_unlock_irqrestore(&vector_lock, flags);
+       return err;
+}
+
+void clear_irq_vector(int irq, struct irq_cfg *cfg)
+{
+       int cpu, vector;
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&vector_lock, flags);
+       BUG_ON(!cfg->vector);
+
+       vector = cfg->vector;
+       for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
+               per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
+
+       cfg->vector = 0;
+       cpumask_clear(cfg->domain);
+
+       if (likely(!cfg->move_in_progress)) {
+               raw_spin_unlock_irqrestore(&vector_lock, flags);
+               return;
+       }
+
+       for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
+               for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
+                    vector++) {
+                       if (per_cpu(vector_irq, cpu)[vector] != irq)
+                               continue;
+                       per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
+                       break;
+               }
+       }
+       cfg->move_in_progress = 0;
+       raw_spin_unlock_irqrestore(&vector_lock, flags);
+}
+
+int __init arch_probe_nr_irqs(void)
+{
+       int nr;
+
+       if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
+               nr_irqs = NR_VECTORS * nr_cpu_ids;
+
+       nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
+#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
+       /*
+        * for MSI and HT dyn irq
+        */
+       if (gsi_top <= NR_IRQS_LEGACY)
+               nr +=  8 * nr_cpu_ids;
+       else
+               nr += gsi_top * 16;
+#endif
+       if (nr < nr_irqs)
+               nr_irqs = nr;
+
+       return nr_legacy_irqs();
+}
+
+int __init arch_early_irq_init(void)
+{
+       return arch_early_ioapic_init();
+}
+
+static void __setup_vector_irq(int cpu)
+{
+       /* Initialize vector_irq on a new cpu */
+       int irq, vector;
+       struct irq_cfg *cfg;
+
+       /*
+        * vector_lock will make sure that we don't run into irq vector
+        * assignments that might be happening on another cpu in parallel,
+        * while we setup our initial vector to irq mappings.
+        */
+       raw_spin_lock(&vector_lock);
+       /* Mark the inuse vectors */
+       for_each_active_irq(irq) {
+               cfg = irq_cfg(irq);
+               if (!cfg)
+                       continue;
+
+               if (!cpumask_test_cpu(cpu, cfg->domain))
+                       continue;
+               vector = cfg->vector;
+               per_cpu(vector_irq, cpu)[vector] = irq;
+       }
+       /* Mark the free vectors */
+       for (vector = 0; vector < NR_VECTORS; ++vector) {
+               irq = per_cpu(vector_irq, cpu)[vector];
+               if (irq <= VECTOR_UNDEFINED)
+                       continue;
+
+               cfg = irq_cfg(irq);
+               if (!cpumask_test_cpu(cpu, cfg->domain))
+                       per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
+       }
+       raw_spin_unlock(&vector_lock);
+}
+
+/*
+ * Setup the vector to irq mappings.
+ */
+void setup_vector_irq(int cpu)
+{
+       int irq;
+
+       /*
+        * On most of the platforms, legacy PIC delivers the interrupts on the
+        * boot cpu. But there are certain platforms where PIC interrupts are
+        * delivered to multiple cpu's. If the legacy IRQ is handled by the
+        * legacy PIC, for the new cpu that is coming online, setup the static
+        * legacy vector to irq mapping:
+        */
+       for (irq = 0; irq < nr_legacy_irqs(); irq++)
+               per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
+
+       __setup_vector_irq(cpu);
+}
+
+int apic_retrigger_irq(struct irq_data *data)
+{
+       struct irq_cfg *cfg = irqd_cfg(data);
+       unsigned long flags;
+       int cpu;
+
+       raw_spin_lock_irqsave(&vector_lock, flags);
+       cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
+       apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
+       raw_spin_unlock_irqrestore(&vector_lock, flags);
+
+       return 1;
+}
+
+void apic_ack_edge(struct irq_data *data)
+{
+       irq_complete_move(irqd_cfg(data));
+       irq_move_irq(data);
+       ack_APIC_irq();
+}
+
+/*
+ * Either sets data->affinity to a valid value, and returns
+ * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
+ * leaves data->affinity untouched.
+ */
+int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
+                     unsigned int *dest_id)
+{
+       struct irq_cfg *cfg = irqd_cfg(data);
+       unsigned int irq = data->irq;
+       int err;
+
+       if (!config_enabled(CONFIG_SMP))
+               return -EPERM;
+
+       if (!cpumask_intersects(mask, cpu_online_mask))
+               return -EINVAL;
+
+       err = assign_irq_vector(irq, cfg, mask);
+       if (err)
+               return err;
+
+       err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
+       if (err) {
+               if (assign_irq_vector(irq, cfg, data->affinity))
+                       pr_err("Failed to recover vector for irq %d\n", irq);
+               return err;
+       }
+
+       cpumask_copy(data->affinity, mask);
+
+       return 0;
+}
+
+#ifdef CONFIG_SMP
+void send_cleanup_vector(struct irq_cfg *cfg)
+{
+       cpumask_var_t cleanup_mask;
+
+       if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
+               unsigned int i;
+
+               for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
+                       apic->send_IPI_mask(cpumask_of(i),
+                                           IRQ_MOVE_CLEANUP_VECTOR);
+       } else {
+               cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
+               apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
+               free_cpumask_var(cleanup_mask);
+       }
+       cfg->move_in_progress = 0;
+}
+
+asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
+{
+       unsigned vector, me;
+
+       ack_APIC_irq();
+       irq_enter();
+       exit_idle();
+
+       me = smp_processor_id();
+       for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
+               int irq;
+               unsigned int irr;
+               struct irq_desc *desc;
+               struct irq_cfg *cfg;
+
+               irq = __this_cpu_read(vector_irq[vector]);
+
+               if (irq <= VECTOR_UNDEFINED)
+                       continue;
+
+               desc = irq_to_desc(irq);
+               if (!desc)
+                       continue;
+
+               cfg = irq_cfg(irq);
+               if (!cfg)
+                       continue;
+
+               raw_spin_lock(&desc->lock);
+
+               /*
+                * Check if the irq migration is in progress. If so, we
+                * haven't received the cleanup request yet for this irq.
+                */
+               if (cfg->move_in_progress)
+                       goto unlock;
+
+               if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
+                       goto unlock;
+
+               irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
+               /*
+                * Check if the vector that needs to be cleanedup is
+                * registered at the cpu's IRR. If so, then this is not
+                * the best time to clean it up. Lets clean it up in the
+                * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
+                * to myself.
+                */
+               if (irr  & (1 << (vector % 32))) {
+                       apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
+                       goto unlock;
+               }
+               __this_cpu_write(vector_irq[vector], VECTOR_UNDEFINED);
+unlock:
+               raw_spin_unlock(&desc->lock);
+       }
+
+       irq_exit();
+}
+
+static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
+{
+       unsigned me;
+
+       if (likely(!cfg->move_in_progress))
+               return;
+
+       me = smp_processor_id();
+
+       if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
+               send_cleanup_vector(cfg);
+}
+
+void irq_complete_move(struct irq_cfg *cfg)
+{
+       __irq_complete_move(cfg, ~get_irq_regs()->orig_ax);
+}
+
+void irq_force_complete_move(int irq)
+{
+       struct irq_cfg *cfg = irq_cfg(irq);
+
+       if (!cfg)
+               return;
+
+       __irq_complete_move(cfg, cfg->vector);
+}
+#endif
+
+/*
+ * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
+ */
+int arch_setup_hwirq(unsigned int irq, int node)
+{
+       struct irq_cfg *cfg;
+       unsigned long flags;
+       int ret;
+
+       cfg = alloc_irq_cfg(irq, node);
+       if (!cfg)
+               return -ENOMEM;
+
+       raw_spin_lock_irqsave(&vector_lock, flags);
+       ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
+       raw_spin_unlock_irqrestore(&vector_lock, flags);
+
+       if (!ret)
+               irq_set_chip_data(irq, cfg);
+       else
+               free_irq_cfg(irq, cfg);
+       return ret;
+}
+
+void arch_teardown_hwirq(unsigned int irq)
+{
+       struct irq_cfg *cfg = irq_cfg(irq);
+
+       free_remapped_irq(irq);
+       clear_irq_vector(irq, cfg);
+       free_irq_cfg(irq, cfg);
+}
+
+static void __init print_APIC_field(int base)
+{
+       int i;
+
+       printk(KERN_DEBUG);
+
+       for (i = 0; i < 8; i++)
+               pr_cont("%08x", apic_read(base + i*0x10));
+
+       pr_cont("\n");
+}
+
+static void __init print_local_APIC(void *dummy)
+{
+       unsigned int i, v, ver, maxlvt;
+       u64 icr;
+
+       pr_debug("printing local APIC contents on CPU#%d/%d:\n",
+                smp_processor_id(), hard_smp_processor_id());
+       v = apic_read(APIC_ID);
+       pr_info("... APIC ID:      %08x (%01x)\n", v, read_apic_id());
+       v = apic_read(APIC_LVR);
+       pr_info("... APIC VERSION: %08x\n", v);
+       ver = GET_APIC_VERSION(v);
+       maxlvt = lapic_get_maxlvt();
+
+       v = apic_read(APIC_TASKPRI);
+       pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
+
+       /* !82489DX */
+       if (APIC_INTEGRATED(ver)) {
+               if (!APIC_XAPIC(ver)) {
+                       v = apic_read(APIC_ARBPRI);
+                       pr_debug("... APIC ARBPRI: %08x (%02x)\n",
+                                v, v & APIC_ARBPRI_MASK);
+               }
+               v = apic_read(APIC_PROCPRI);
+               pr_debug("... APIC PROCPRI: %08x\n", v);
+       }
+
+       /*
+        * Remote read supported only in the 82489DX and local APIC for
+        * Pentium processors.
+        */
+       if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
+               v = apic_read(APIC_RRR);
+               pr_debug("... APIC RRR: %08x\n", v);
+       }
+
+       v = apic_read(APIC_LDR);
+       pr_debug("... APIC LDR: %08x\n", v);
+       if (!x2apic_enabled()) {
+               v = apic_read(APIC_DFR);
+               pr_debug("... APIC DFR: %08x\n", v);
+       }
+       v = apic_read(APIC_SPIV);
+       pr_debug("... APIC SPIV: %08x\n", v);
+
+       pr_debug("... APIC ISR field:\n");
+       print_APIC_field(APIC_ISR);
+       pr_debug("... APIC TMR field:\n");
+       print_APIC_field(APIC_TMR);
+       pr_debug("... APIC IRR field:\n");
+       print_APIC_field(APIC_IRR);
+
+       /* !82489DX */
+       if (APIC_INTEGRATED(ver)) {
+               /* Due to the Pentium erratum 3AP. */
+               if (maxlvt > 3)
+                       apic_write(APIC_ESR, 0);
+
+               v = apic_read(APIC_ESR);
+               pr_debug("... APIC ESR: %08x\n", v);
+       }
+
+       icr = apic_icr_read();
+       pr_debug("... APIC ICR: %08x\n", (u32)icr);
+       pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32));
+
+       v = apic_read(APIC_LVTT);
+       pr_debug("... APIC LVTT: %08x\n", v);
+
+       if (maxlvt > 3) {
+               /* PC is LVT#4. */
+               v = apic_read(APIC_LVTPC);
+               pr_debug("... APIC LVTPC: %08x\n", v);
+       }
+       v = apic_read(APIC_LVT0);
+       pr_debug("... APIC LVT0: %08x\n", v);
+       v = apic_read(APIC_LVT1);
+       pr_debug("... APIC LVT1: %08x\n", v);
+
+       if (maxlvt > 2) {
+               /* ERR is LVT#3. */
+               v = apic_read(APIC_LVTERR);
+               pr_debug("... APIC LVTERR: %08x\n", v);
+       }
+
+       v = apic_read(APIC_TMICT);
+       pr_debug("... APIC TMICT: %08x\n", v);
+       v = apic_read(APIC_TMCCT);
+       pr_debug("... APIC TMCCT: %08x\n", v);
+       v = apic_read(APIC_TDCR);
+       pr_debug("... APIC TDCR: %08x\n", v);
+
+       if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
+               v = apic_read(APIC_EFEAT);
+               maxlvt = (v >> 16) & 0xff;
+               pr_debug("... APIC EFEAT: %08x\n", v);
+               v = apic_read(APIC_ECTRL);
+               pr_debug("... APIC ECTRL: %08x\n", v);
+               for (i = 0; i < maxlvt; i++) {
+                       v = apic_read(APIC_EILVTn(i));
+                       pr_debug("... APIC EILVT%d: %08x\n", i, v);
+               }
+       }
+       pr_cont("\n");
+}
+
+static void __init print_local_APICs(int maxcpu)
+{
+       int cpu;
+
+       if (!maxcpu)
+               return;
+
+       preempt_disable();
+       for_each_online_cpu(cpu) {
+               if (cpu >= maxcpu)
+                       break;
+               smp_call_function_single(cpu, print_local_APIC, NULL, 1);
+       }
+       preempt_enable();
+}
+
+static void __init print_PIC(void)
+{
+       unsigned int v;
+       unsigned long flags;
+
+       if (!nr_legacy_irqs())
+               return;
+
+       pr_debug("\nprinting PIC contents\n");
+
+       raw_spin_lock_irqsave(&i8259A_lock, flags);
+
+       v = inb(0xa1) << 8 | inb(0x21);
+       pr_debug("... PIC  IMR: %04x\n", v);
+
+       v = inb(0xa0) << 8 | inb(0x20);
+       pr_debug("... PIC  IRR: %04x\n", v);
+
+       outb(0x0b, 0xa0);
+       outb(0x0b, 0x20);
+       v = inb(0xa0) << 8 | inb(0x20);
+       outb(0x0a, 0xa0);
+       outb(0x0a, 0x20);
+
+       raw_spin_unlock_irqrestore(&i8259A_lock, flags);
+
+       pr_debug("... PIC  ISR: %04x\n", v);
+
+       v = inb(0x4d1) << 8 | inb(0x4d0);
+       pr_debug("... PIC ELCR: %04x\n", v);
+}
+
+static int show_lapic __initdata = 1;
+static __init int setup_show_lapic(char *arg)
+{
+       int num = -1;
+
+       if (strcmp(arg, "all") == 0) {
+               show_lapic = CONFIG_NR_CPUS;
+       } else {
+               get_option(&arg, &num);
+               if (num >= 0)
+                       show_lapic = num;
+       }
+
+       return 1;
+}
+__setup("show_lapic=", setup_show_lapic);
+
+static int __init print_ICs(void)
+{
+       if (apic_verbosity == APIC_QUIET)
+               return 0;
+
+       print_PIC();
+
+       /* don't print out if apic is not there */
+       if (!cpu_has_apic && !apic_from_smp_config())
+               return 0;
+
+       print_local_APICs(show_lapic);
+       print_IO_APICs();
+
+       return 0;
+}
+
+late_initcall(print_ICs);
index 08f3fed2b0f2bfe27997088f683b9613ddf30fe9..10b8d3eaaf15d760468a6ad88105ab7e06cd540b 100644 (file)
@@ -276,6 +276,17 @@ static struct intel_uncore_box *uncore_alloc_box(struct intel_uncore_type *type,
        return box;
 }
 
+/*
+ * Using uncore_pmu_event_init pmu event_init callback
+ * as a detection point for uncore events.
+ */
+static int uncore_pmu_event_init(struct perf_event *event);
+
+static bool is_uncore_event(struct perf_event *event)
+{
+       return event->pmu->event_init == uncore_pmu_event_init;
+}
+
 static int
 uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, bool dogrp)
 {
@@ -290,13 +301,18 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, b
                return -EINVAL;
 
        n = box->n_events;
-       box->event_list[n] = leader;
-       n++;
+
+       if (is_uncore_event(leader)) {
+               box->event_list[n] = leader;
+               n++;
+       }
+
        if (!dogrp)
                return n;
 
        list_for_each_entry(event, &leader->sibling_list, group_entry) {
-               if (event->state <= PERF_EVENT_STATE_OFF)
+               if (!is_uncore_event(event) ||
+                   event->state <= PERF_EVENT_STATE_OFF)
                        continue;
 
                if (n >= max_count)
index f5ab56d1428718f6327c91012d5373cc200438a2..aceb2f90c7166afcfa844cd7da58bfe6358efaa6 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/nmi.h>
 #include <asm/hw_irq.h>
 #include <asm/apic.h>
+#include <asm/io_apic.h>
 #include <asm/hpet.h>
 #include <linux/kdebug.h>
 #include <asm/cpu.h>
index 1cf7c97ff175e958979068fd7114996b9e45af0e..000d4199b03e69905527d7972ccd918835e6cc1f 100644 (file)
@@ -732,10 +732,10 @@ ENTRY(interrupt)
 ENTRY(irq_entries_start)
        RING0_INT_FRAME
 vector=FIRST_EXTERNAL_VECTOR
-.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
+.rept (FIRST_SYSTEM_VECTOR-FIRST_EXTERNAL_VECTOR+6)/7
        .balign 32
   .rept        7
-    .if vector < NR_VECTORS
+    .if vector < FIRST_SYSTEM_VECTOR
       .if vector <> FIRST_EXTERNAL_VECTOR
        CFI_ADJUST_CFA_OFFSET -4
       .endif
index 90878aa38dbd7fac20bb42386248f1d860f26ff1..9ebaf63ba18212559728664d4c69385e7321d4f2 100644 (file)
@@ -740,10 +740,10 @@ ENTRY(interrupt)
 ENTRY(irq_entries_start)
        INTR_FRAME
 vector=FIRST_EXTERNAL_VECTOR
-.rept (NR_VECTORS-FIRST_EXTERNAL_VECTOR+6)/7
+.rept (FIRST_SYSTEM_VECTOR-FIRST_EXTERNAL_VECTOR+6)/7
        .balign 32
   .rept        7
-    .if vector < NR_VECTORS
+    .if vector < FIRST_SYSTEM_VECTOR
       .if vector <> FIRST_EXTERNAL_VECTOR
        CFI_ADJUST_CFA_OFFSET -8
       .endif
index 4de73ee78361311493163414dc7439b7bbea75e5..70e181ea1eac1f2da444482e6714e61b52d5a19e 100644 (file)
@@ -99,32 +99,9 @@ void __init init_IRQ(void)
        x86_init.irqs.intr_init();
 }
 
-/*
- * Setup the vector to irq mappings.
- */
-void setup_vector_irq(int cpu)
-{
-#ifndef CONFIG_X86_IO_APIC
-       int irq;
-
-       /*
-        * On most of the platforms, legacy PIC delivers the interrupts on the
-        * boot cpu. But there are certain platforms where PIC interrupts are
-        * delivered to multiple cpu's. If the legacy IRQ is handled by the
-        * legacy PIC, for the new cpu that is coming online, setup the static
-        * legacy vector to irq mapping:
-        */
-       for (irq = 0; irq < nr_legacy_irqs(); irq++)
-               per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
-#endif
-
-       __setup_vector_irq(cpu);
-}
-
 static void __init smp_intr_init(void)
 {
 #ifdef CONFIG_SMP
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
        /*
         * The reschedule interrupt is a CPU-to-CPU reschedule-helper
         * IPI, driven by wakeup.
@@ -144,7 +121,6 @@ static void __init smp_intr_init(void)
 
        /* IPI used for rebooting/stopping */
        alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
-#endif
 #endif /* CONFIG_SMP */
 }
 
@@ -159,7 +135,7 @@ static void __init apic_intr_init(void)
        alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
 #endif
 
-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
+#ifdef CONFIG_X86_LOCAL_APIC
        /* self generated IPI for local APIC timer */
        alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
 
@@ -197,10 +173,17 @@ void __init native_init_IRQ(void)
         * 'special' SMP interrupts)
         */
        i = FIRST_EXTERNAL_VECTOR;
-       for_each_clear_bit_from(i, used_vectors, NR_VECTORS) {
+#ifndef CONFIG_X86_LOCAL_APIC
+#define first_system_vector NR_VECTORS
+#endif
+       for_each_clear_bit_from(i, used_vectors, first_system_vector) {
                /* IA32_SYSCALL_VECTOR could be used in trap_init already. */
                set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
        }
+#ifdef CONFIG_X86_LOCAL_APIC
+       for_each_clear_bit_from(i, used_vectors, NR_VECTORS)
+               set_intr_gate(i, spurious_interrupt);
+#endif
 
        if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs())
                setup_irq(2, &irq2);
index 72e8e310258d610c8a05f89920ddf101deb92a05..469b23d6acc272b2113878182582d9fa7532f189 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
 #include <asm/apic.h>
+#include <asm/io_apic.h>
 #include <asm/cpufeature.h>
 #include <asm/desc.h>
 #include <asm/cacheflush.h>
index 485981059a40e703e7be8debd49a14e45dde0e54..415480d3ea848bcf95e7ae26b56a5261add1cc8c 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/mmu_context.h>
+#include <asm/io_apic.h>
 #include <asm/debugreg.h>
 #include <asm/kexec-bzimage64.h>
 
index 17962e667a91c64f7e99e62a04d83b253823691d..bae6c609888e7fdff25784d5bd96fd8dcd5ea88a 100644 (file)
@@ -12,6 +12,7 @@
 #include <acpi/reboot.h>
 #include <asm/io.h>
 #include <asm/apic.h>
+#include <asm/io_apic.h>
 #include <asm/desc.h>
 #include <asm/hpet.h>
 #include <asm/pgtable.h>
index 7a8f5845e8eb9140e849b52cabf852a82313e979..6d7022c683e31555967f20edfc18b490576bc10b 100644 (file)
@@ -1084,7 +1084,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
 {
        unsigned int i;
 
-       preempt_disable();
        smp_cpu_index_default();
 
        /*
@@ -1102,22 +1101,19 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
        }
        set_cpu_sibling_map(0);
 
-
        if (smp_sanity_check(max_cpus) < 0) {
                pr_info("SMP disabled\n");
                disable_smp();
-               goto out;
+               return;
        }
 
        default_setup_apic_routing();
 
-       preempt_disable();
        if (read_apic_id() != boot_cpu_physical_apicid) {
                panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
                     read_apic_id(), boot_cpu_physical_apicid);
                /* Or can we switch back to PIC here? */
        }
-       preempt_enable();
 
        connect_bsp_APIC();
 
@@ -1151,8 +1147,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
                uv_system_init();
 
        set_mtrr_aps_delayed_init();
-out:
-       preempt_enable();
 }
 
 void arch_enable_nonboot_cpus_begin(void)
index 3e551eee87b90e2e14785eadf93646ede67cf9a3..4e942f31b1a7c9401a65fb37af093caab5ad0c2e 100644 (file)
@@ -55,12 +55,6 @@ static bool tls_desc_okay(const struct user_desc *info)
        if (info->seg_not_present)
                return false;
 
-#ifdef CONFIG_X86_64
-       /* The L bit makes no sense for data. */
-       if (info->lm)
-               return false;
-#endif
-
        return true;
 }
 
index a9ae205798954d86510ceead4287c782968685ad..88900e288021f23a2f22aebf739e25070f456971 100644 (file)
@@ -331,7 +331,7 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
                break; /* Success, it was handled */
        case 1: /* Bound violation. */
                info = mpx_generate_siginfo(regs, xsave_buf);
-               if (PTR_ERR(info)) {
+               if (IS_ERR(info)) {
                        /*
                         * We failed to decode the MPX instruction.  Act as if
                         * the exception was not caused by MPX.
index 10fbed126b1121ae5fde2f7ccfbf04133b5f8771..f83fc6c5e0bad6712e9f172acaba931c47cd32cd 100644 (file)
@@ -4448,7 +4448,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
         * zap all shadow pages.
         */
        if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
-               printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
+               printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
                kvm_mmu_invalidate_zap_all_pages(kvm);
        }
 }
index feb852b04598b63d187b0870db26d008c81a13d8..d4c58d884838d1539cb52e13df3c6893459a98af 100644 (file)
@@ -5840,53 +5840,10 @@ static __init int hardware_setup(void)
        memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
        memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
 
-       vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
-       vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
-       vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
-       vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
-       vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
-       vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
-       vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
-
-       memcpy(vmx_msr_bitmap_legacy_x2apic,
-                       vmx_msr_bitmap_legacy, PAGE_SIZE);
-       memcpy(vmx_msr_bitmap_longmode_x2apic,
-                       vmx_msr_bitmap_longmode, PAGE_SIZE);
-
-       if (enable_apicv) {
-               for (msr = 0x800; msr <= 0x8ff; msr++)
-                       vmx_disable_intercept_msr_read_x2apic(msr);
-
-               /* According SDM, in x2apic mode, the whole id reg is used.
-                * But in KVM, it only use the highest eight bits. Need to
-                * intercept it */
-               vmx_enable_intercept_msr_read_x2apic(0x802);
-               /* TMCCT */
-               vmx_enable_intercept_msr_read_x2apic(0x839);
-               /* TPR */
-               vmx_disable_intercept_msr_write_x2apic(0x808);
-               /* EOI */
-               vmx_disable_intercept_msr_write_x2apic(0x80b);
-               /* SELF-IPI */
-               vmx_disable_intercept_msr_write_x2apic(0x83f);
-       }
-
-       if (enable_ept) {
-               kvm_mmu_set_mask_ptes(0ull,
-                       (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
-                       (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
-                       0ull, VMX_EPT_EXECUTABLE_MASK);
-               ept_set_mmio_spte_mask();
-               kvm_enable_tdp();
-       } else
-               kvm_disable_tdp();
-
-       update_ple_window_actual_max();
-
        if (setup_vmcs_config(&vmcs_config) < 0) {
                r = -EIO;
                goto out7;
-    }
+       }
 
        if (boot_cpu_has(X86_FEATURE_NX))
                kvm_enable_efer_bits(EFER_NX);
@@ -5945,6 +5902,49 @@ static __init int hardware_setup(void)
        if (nested)
                nested_vmx_setup_ctls_msrs();
 
+       vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
+       vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
+       vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
+       vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
+       vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
+       vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
+       vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
+
+       memcpy(vmx_msr_bitmap_legacy_x2apic,
+                       vmx_msr_bitmap_legacy, PAGE_SIZE);
+       memcpy(vmx_msr_bitmap_longmode_x2apic,
+                       vmx_msr_bitmap_longmode, PAGE_SIZE);
+
+       if (enable_apicv) {
+               for (msr = 0x800; msr <= 0x8ff; msr++)
+                       vmx_disable_intercept_msr_read_x2apic(msr);
+
+               /* According SDM, in x2apic mode, the whole id reg is used.
+                * But in KVM, it only use the highest eight bits. Need to
+                * intercept it */
+               vmx_enable_intercept_msr_read_x2apic(0x802);
+               /* TMCCT */
+               vmx_enable_intercept_msr_read_x2apic(0x839);
+               /* TPR */
+               vmx_disable_intercept_msr_write_x2apic(0x808);
+               /* EOI */
+               vmx_disable_intercept_msr_write_x2apic(0x80b);
+               /* SELF-IPI */
+               vmx_disable_intercept_msr_write_x2apic(0x83f);
+       }
+
+       if (enable_ept) {
+               kvm_mmu_set_mask_ptes(0ull,
+                       (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
+                       (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
+                       0ull, VMX_EPT_EXECUTABLE_MASK);
+               ept_set_mmio_spte_mask();
+               kvm_enable_tdp();
+       } else
+               kvm_disable_tdp();
+
+       update_ple_window_actual_max();
+
        return alloc_kvm_area();
 
 out7:
index aae94132bc24dd42b548d3dc18214b668195c06f..c1c1544b84859e9675ad71ac85cab1568994b623 100644 (file)
@@ -841,7 +841,7 @@ static void __init lguest_init_IRQ(void)
 {
        unsigned int i;
 
-       for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
+       for (i = FIRST_EXTERNAL_VECTOR; i < FIRST_SYSTEM_VECTOR; i++) {
                /* Some systems map "vectors" to interrupts weirdly.  Not us! */
                __this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
                if (i != SYSCALL_VECTOR)
index 207d9aef662def24bfef094ff587f56f33afa595..d7547824e76369d6e14ed6ddd7ee39a55ef94a2e 100644 (file)
@@ -15,7 +15,7 @@
 static inline pte_t gup_get_pte(pte_t *ptep)
 {
 #ifndef CONFIG_X86_PAE
-       return ACCESS_ONCE(*ptep);
+       return READ_ONCE(*ptep);
 #else
        /*
         * With get_user_pages_fast, we walk down the pagetables without taking
index b9958c364075e949620001cc9d8f02a2715d0128..44b9271580b5b0532bddf121af554cc0ec951779 100644 (file)
@@ -210,6 +210,9 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
 {
        int polarity;
 
+       if (dev->irq_managed && dev->irq > 0)
+               return 0;
+
        if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER)
                polarity = 0; /* active high */
        else
@@ -224,13 +227,18 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
        if (mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC) < 0)
                return -EBUSY;
 
+       dev->irq_managed = 1;
+
        return 0;
 }
 
 static void intel_mid_pci_irq_disable(struct pci_dev *dev)
 {
-       if (!mp_should_keep_irq(&dev->dev) && dev->irq > 0)
+       if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
+           dev->irq > 0) {
                mp_unmap_irq(dev->irq);
+               dev->irq_managed = 0;
+       }
 }
 
 struct pci_ops intel_mid_pci_ops = {
index eb500c2592ad8ab4ced728f676d9102411fa3782..5dc6ca5e174131d2c7208ea1ed86739ef4532d22 100644 (file)
@@ -1200,11 +1200,12 @@ static int pirq_enable_irq(struct pci_dev *dev)
 #ifdef CONFIG_X86_IO_APIC
                        struct pci_dev *temp_dev;
                        int irq;
-                       struct io_apic_irq_attr irq_attr;
+
+                       if (dev->irq_managed && dev->irq > 0)
+                               return 0;
 
                        irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
-                                               PCI_SLOT(dev->devfn),
-                                               pin - 1, &irq_attr);
+                                               PCI_SLOT(dev->devfn), pin - 1);
                        /*
                         * Busses behind bridges are typically not listed in the MP-table.
                         * In this case we have to look up the IRQ based on the parent bus,
@@ -1218,7 +1219,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
                                pin = pci_swizzle_interrupt_pin(dev, pin);
                                irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
                                                PCI_SLOT(bridge->devfn),
-                                               pin - 1, &irq_attr);
+                                               pin - 1);
                                if (irq >= 0)
                                        dev_warn(&dev->dev, "using bridge %s "
                                                 "INT %c to get IRQ %d\n",
@@ -1228,6 +1229,7 @@ static int pirq_enable_irq(struct pci_dev *dev)
                        }
                        dev = temp_dev;
                        if (irq >= 0) {
+                               dev->irq_managed = 1;
                                dev->irq = irq;
                                dev_info(&dev->dev, "PCI->APIC IRQ transform: "
                                         "INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
@@ -1254,11 +1256,24 @@ static int pirq_enable_irq(struct pci_dev *dev)
        return 0;
 }
 
+bool mp_should_keep_irq(struct device *dev)
+{
+       if (dev->power.is_prepared)
+               return true;
+#ifdef CONFIG_PM
+       if (dev->power.runtime_status == RPM_SUSPENDING)
+               return true;
+#endif
+
+       return false;
+}
+
 static void pirq_disable_irq(struct pci_dev *dev)
 {
        if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
-           dev->irq) {
+           dev->irq_managed && dev->irq) {
                mp_unmap_irq(dev->irq);
                dev->irq = 0;
+               dev->irq_managed = 0;
        }
 }
index b233681af4de6ecaaad9305546496b3ea83334b8..0ce67364543242a21fd3d6e2b40f8864623ce668 100644 (file)
@@ -131,7 +131,7 @@ arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
                       unsigned long mmr_offset, int limit)
 {
        const struct cpumask *eligible_cpu = cpumask_of(cpu);
-       struct irq_cfg *cfg = irq_get_chip_data(irq);
+       struct irq_cfg *cfg = irq_cfg(irq);
        unsigned long mmr_value;
        struct uv_IO_APIC_route_entry *entry;
        int mmr_pnode, err;
@@ -198,13 +198,13 @@ static int
 uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
                    bool force)
 {
-       struct irq_cfg *cfg = data->chip_data;
+       struct irq_cfg *cfg = irqd_cfg(data);
        unsigned int dest;
        unsigned long mmr_value, mmr_offset;
        struct uv_IO_APIC_route_entry *entry;
        int mmr_pnode;
 
-       if (__ioapic_set_affinity(data, mask, &dest))
+       if (apic_set_affinity(data, mask, &dest))
                return -1;
 
        mmr_value = 0;
index 7cc4e33179f9f8f3fb2e6c5741303ec0cc8c251e..5277a0ee57042b26cf78e6186a17899233801273 100644 (file)
@@ -413,6 +413,9 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
                return 0;
        }
 
+       if (dev->irq_managed && dev->irq > 0)
+               return 0;
+
        entry = acpi_pci_irq_lookup(dev, pin);
        if (!entry) {
                /*
@@ -456,6 +459,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
                return rc;
        }
        dev->irq = rc;
+       dev->irq_managed = 1;
 
        if (link)
                snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -478,7 +482,7 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
        u8 pin;
 
        pin = dev->pin;
-       if (!pin)
+       if (!pin || !dev->irq_managed || dev->irq <= 0)
                return;
 
        /* Keep IOAPIC pin configuration when suspending */
@@ -506,6 +510,9 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
         */
 
        dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
-       if (gsi >= 0 && dev->irq > 0)
+       if (gsi >= 0) {
                acpi_unregister_gsi(gsi);
+               dev->irq = 0;
+               dev->irq_managed = 0;
+       }
 }
index ef58f46c844287e4f64ff44a16ff63b1dd884537..342942f90a1031a3650306144d6858bb58a79b08 100644 (file)
@@ -125,13 +125,12 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
        }
 
        header = (struct acpi_subtable_header *)obj->buffer.pointer;
-       if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
+       if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
                map_lapic_id(header, acpi_id, &apic_id);
-       } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
+       else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
                map_lsapic_id(header, type, acpi_id, &apic_id);
-       } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
+       else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
                map_x2apic_id(header, type, acpi_id, &apic_id);
-       }
 
 exit:
        kfree(buffer.pointer);
@@ -164,7 +163,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
                 * For example,
                 *
                 * Scope (_PR)
-                 * {
+                * {
                 *     Processor (CPU0, 0x00, 0x00000410, 0x06) {}
                 *     Processor (CPU1, 0x01, 0x00000410, 0x06) {}
                 *     Processor (CPU2, 0x02, 0x00000410, 0x06) {}
index 4995365046984e55855373484b47936b2569447d..87b704e41877daa488eec962f545f32cf0d079fe 100644 (file)
@@ -985,8 +985,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
                state->flags = 0;
                switch (cx->type) {
                        case ACPI_STATE_C1:
-                       if (cx->entry_method != ACPI_CSTATE_FFH)
-                               state->flags |= CPUIDLE_FLAG_TIME_INVALID;
 
                        state->enter = acpi_idle_enter_c1;
                        state->enter_dead = acpi_idle_play_dead;
index 2ba8f02ced3637e0b1431bb16049b2f238069ee2..782a0d15c25fa7b89900a06a3bb8eb194ddcd032 100644 (file)
@@ -200,7 +200,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares,
 
        status = acpi_resource_to_address64(ares, &addr);
        if (ACPI_FAILURE(status))
-               return true;
+               return false;
 
        res->start = addr.minimum;
        res->end = addr.maximum;
index 1eaadff2e198037ac9b8567517e8f37e37f09d4a..c72e79d2c5ad2559bce9f45ec9d26e99c04c8c2d 100644 (file)
@@ -505,6 +505,23 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
                },
        },
+
+       {
+        .callback = video_disable_native_backlight,
+        .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
+               },
+       },
+       {
+        .callback = video_disable_native_backlight,
+        .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
+               },
+       },
        {}
 };
 
index 6a103a35ea9b375f9328573c2fdece516690c2f1..0d8780c04a5e4d7c409b2ad1aa2d7e4c0c4f424b 100644 (file)
@@ -2088,7 +2088,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider);
  * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
  * on failure.
  */
-static struct generic_pm_domain *of_genpd_get_from_provider(
+struct generic_pm_domain *of_genpd_get_from_provider(
                                        struct of_phandle_args *genpdspec)
 {
        struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
@@ -2108,6 +2108,7 @@ static struct generic_pm_domain *of_genpd_get_from_provider(
 
        return genpd;
 }
+EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
 
 /**
  * genpd_dev_pm_detach - Detach a device from its PM domain.
index d24dd614a0bd3208c0d88fd79c35397db5be8a71..106c69359306b595e74075853ae8cc4e1465924f 100644 (file)
@@ -108,6 +108,14 @@ static LIST_HEAD(dev_opp_list);
 /* Lock to allow exclusive modification to the device and opp lists */
 static DEFINE_MUTEX(dev_opp_list_lock);
 
+#define opp_rcu_lockdep_assert()                                       \
+do {                                                                   \
+       rcu_lockdep_assert(rcu_read_lock_held() ||                      \
+                               lockdep_is_held(&dev_opp_list_lock),    \
+                          "Missing rcu_read_lock() or "                \
+                          "dev_opp_list_lock protection");             \
+} while (0)
+
 /**
  * find_device_opp() - find device_opp struct using device pointer
  * @dev:       device pointer used to lookup device OPPs
@@ -208,9 +216,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
  * This function returns the number of available opps if there are any,
  * else returns 0 if none or the corresponding error value.
  *
- * Locking: This function must be called under rcu_read_lock(). This function
- * internally references two RCU protected structures: device_opp and opp which
- * are safe as long as we are under a common RCU locked section.
+ * Locking: This function takes rcu_read_lock().
  */
 int dev_pm_opp_get_opp_count(struct device *dev)
 {
@@ -218,11 +224,14 @@ int dev_pm_opp_get_opp_count(struct device *dev)
        struct dev_pm_opp *temp_opp;
        int count = 0;
 
+       rcu_read_lock();
+
        dev_opp = find_device_opp(dev);
        if (IS_ERR(dev_opp)) {
-               int r = PTR_ERR(dev_opp);
-               dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
-               return r;
+               count = PTR_ERR(dev_opp);
+               dev_err(dev, "%s: device OPP not found (%d)\n",
+                       __func__, count);
+               goto out_unlock;
        }
 
        list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
@@ -230,6 +239,8 @@ int dev_pm_opp_get_opp_count(struct device *dev)
                        count++;
        }
 
+out_unlock:
+       rcu_read_unlock();
        return count;
 }
 EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
@@ -267,6 +278,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
        struct device_opp *dev_opp;
        struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
+       opp_rcu_lockdep_assert();
+
        dev_opp = find_device_opp(dev);
        if (IS_ERR(dev_opp)) {
                int r = PTR_ERR(dev_opp);
@@ -313,6 +326,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
        struct device_opp *dev_opp;
        struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
+       opp_rcu_lockdep_assert();
+
        if (!dev || !freq) {
                dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
                return ERR_PTR(-EINVAL);
@@ -361,6 +376,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
        struct device_opp *dev_opp;
        struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
 
+       opp_rcu_lockdep_assert();
+
        if (!dev || !freq) {
                dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
                return ERR_PTR(-EINVAL);
@@ -783,9 +800,15 @@ void of_free_opp_table(struct device *dev)
 
        /* Check for existing list for 'dev' */
        dev_opp = find_device_opp(dev);
-       if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev),
-                PTR_ERR(dev_opp)))
+       if (IS_ERR(dev_opp)) {
+               int error = PTR_ERR(dev_opp);
+               if (error != -ENODEV)
+                       WARN(1, "%s: dev_opp: %d\n",
+                            IS_ERR_OR_NULL(dev) ?
+                                       "Invalid device" : dev_name(dev),
+                            error);
                return;
+       }
 
        /* Hold our list modification lock here */
        mutex_lock(&dev_opp_list_lock);
index 19db036676505519fb19538299afe2f5c84dd42a..dcbbb4ea3cc1d7040a847799fad2415e44a24e08 100644 (file)
@@ -417,6 +417,6 @@ static void __exit agp_ali_cleanup(void)
 module_init(agp_ali_init);
 module_exit(agp_ali_cleanup);
 
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
 MODULE_LICENSE("GPL and additional rights");
 
index 3b47ed0310e1fcbb73eda184cb810c1b3ea2889b..0ef350010766355c933ebe7e48ce41a78656b0ac 100644 (file)
@@ -813,6 +813,6 @@ static void __exit agp_amd64_cleanup(void)
 module_init(agp_amd64_mod_init);
 module_exit(agp_amd64_cleanup);
 
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
+MODULE_AUTHOR("Dave Jones, Andi Kleen");
 module_param(agp_try_unsupported, bool, 0);
 MODULE_LICENSE("GPL");
index 18a7a6baa304eadfc3968a640f97ac3da590b106..75a9786a77e6307711a74bcde8f4dd7fee8fcae0 100644 (file)
@@ -579,6 +579,6 @@ static void __exit agp_ati_cleanup(void)
 module_init(agp_ati_init);
 module_exit(agp_ati_cleanup);
 
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
 MODULE_LICENSE("GPL and additional rights");
 
index 317c28ce8328bb310c892c8bab4aca60cbbbc4f5..38ffb281df97c71eee3dc8980a98e19df3ca24d3 100644 (file)
@@ -356,7 +356,7 @@ static __init int agp_setup(char *s)
 __setup("agp=", agp_setup);
 #endif
 
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Jeff Hartmann");
 MODULE_DESCRIPTION("AGP GART driver");
 MODULE_LICENSE("GPL and additional rights");
 MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
index f9b9ca5d31b7946c3b1407ef692db1c2c149c628..0a21daed5b6251edd8136bc014d073ab734a2a0a 100644 (file)
@@ -920,5 +920,5 @@ static void __exit agp_intel_cleanup(void)
 module_init(agp_intel_init);
 module_exit(agp_intel_cleanup);
 
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Various @Intel");
 MODULE_LICENSE("GPL and additional rights");
index f3334829e55a3e6ac393a348c6da0e7aef159709..92aa43fa8d70205e621cc3f3fe03e8bbcd94a42f 100644 (file)
@@ -1438,5 +1438,5 @@ void intel_gmch_remove(void)
 }
 EXPORT_SYMBOL(intel_gmch_remove);
 
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Various @Intel");
 MODULE_LICENSE("GPL and additional rights");
index a1861b75eb31a9fe433e32105ef623092741052e..6c8d39cb566e32ea4ac14c818f3bd16962823097 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Nvidia AGPGART routines.
  * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
- * to work in 2.5 by Dave Jones <davej@redhat.com>
+ * to work in 2.5 by Dave Jones.
  */
 
 #include <linux/module.h>
index 228f20cddc056244711805b20c44d168a4d94493..a4961d35e94046de34896c492e032b9c5e72c1ae 100644 (file)
@@ -595,4 +595,4 @@ module_init(agp_via_init);
 module_exit(agp_via_cleanup);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
index 5fa83f751378fb56ecc7da11a16d2c8386fb6862..6b65fa4e0c5586895df2b26ee499c9e5ad4d8b2c 100644 (file)
@@ -199,18 +199,6 @@ struct bmc_device {
        int                    guid_set;
        char                   name[16];
        struct kref            usecount;
-
-       /* bmc device attributes */
-       struct device_attribute device_id_attr;
-       struct device_attribute provides_dev_sdrs_attr;
-       struct device_attribute revision_attr;
-       struct device_attribute firmware_rev_attr;
-       struct device_attribute version_attr;
-       struct device_attribute add_dev_support_attr;
-       struct device_attribute manufacturer_id_attr;
-       struct device_attribute product_id_attr;
-       struct device_attribute guid_attr;
-       struct device_attribute aux_firmware_rev_attr;
 };
 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
 
@@ -2252,7 +2240,7 @@ static ssize_t device_id_show(struct device *dev,
 
        return snprintf(buf, 10, "%u\n", bmc->id.device_id);
 }
-DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
+static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
 
 static ssize_t provides_device_sdrs_show(struct device *dev,
                                         struct device_attribute *attr,
@@ -2263,7 +2251,8 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
        return snprintf(buf, 10, "%u\n",
                        (bmc->id.device_revision & 0x80) >> 7);
 }
-DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL);
+static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
+                  NULL);
 
 static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
                             char *buf)
@@ -2273,7 +2262,7 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
        return snprintf(buf, 20, "%u\n",
                        bmc->id.device_revision & 0x0F);
 }
-DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
+static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
 
 static ssize_t firmware_revision_show(struct device *dev,
                                      struct device_attribute *attr,
@@ -2284,7 +2273,7 @@ static ssize_t firmware_revision_show(struct device *dev,
        return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
                        bmc->id.firmware_revision_2);
 }
-DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
+static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
 
 static ssize_t ipmi_version_show(struct device *dev,
                                 struct device_attribute *attr,
@@ -2296,7 +2285,7 @@ static ssize_t ipmi_version_show(struct device *dev,
                        ipmi_version_major(&bmc->id),
                        ipmi_version_minor(&bmc->id));
 }
-DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
+static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
 
 static ssize_t add_dev_support_show(struct device *dev,
                                    struct device_attribute *attr,
@@ -2307,7 +2296,8 @@ static ssize_t add_dev_support_show(struct device *dev,
        return snprintf(buf, 10, "0x%02x\n",
                        bmc->id.additional_device_support);
 }
-DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL);
+static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
+                  NULL);
 
 static ssize_t manufacturer_id_show(struct device *dev,
                                    struct device_attribute *attr,
@@ -2317,7 +2307,7 @@ static ssize_t manufacturer_id_show(struct device *dev,
 
        return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
 }
-DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
+static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
 
 static ssize_t product_id_show(struct device *dev,
                               struct device_attribute *attr,
@@ -2327,7 +2317,7 @@ static ssize_t product_id_show(struct device *dev,
 
        return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
 }
-DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
+static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
 
 static ssize_t aux_firmware_rev_show(struct device *dev,
                                     struct device_attribute *attr,
@@ -2341,7 +2331,7 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
                        bmc->id.aux_firmware_revision[1],
                        bmc->id.aux_firmware_revision[0]);
 }
-DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
+static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
 
 static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
@@ -2352,7 +2342,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
                        (long long) bmc->guid[0],
                        (long long) bmc->guid[8]);
 }
-DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
+static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
 
 static struct attribute *bmc_dev_attrs[] = {
        &dev_attr_device_id.attr,
@@ -2392,10 +2382,10 @@ cleanup_bmc_device(struct kref *ref)
 
        if (bmc->id.aux_firmware_revision_set)
                device_remove_file(&bmc->pdev.dev,
-                                  &bmc->aux_firmware_rev_attr);
+                                  &dev_attr_aux_firmware_revision);
        if (bmc->guid_set)
                device_remove_file(&bmc->pdev.dev,
-                                  &bmc->guid_attr);
+                                  &dev_attr_guid);
 
        platform_device_unregister(&bmc->pdev);
 }
@@ -2422,16 +2412,14 @@ static int create_bmc_files(struct bmc_device *bmc)
        int err;
 
        if (bmc->id.aux_firmware_revision_set) {
-               bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
                err = device_create_file(&bmc->pdev.dev,
-                                  &bmc->aux_firmware_rev_attr);
+                                        &dev_attr_aux_firmware_revision);
                if (err)
                        goto out;
        }
        if (bmc->guid_set) {
-               bmc->guid_attr.attr.name = "guid";
                err = device_create_file(&bmc->pdev.dev,
-                                  &bmc->guid_attr);
+                                        &dev_attr_guid);
                if (err)
                        goto out_aux_firm;
        }
@@ -2441,7 +2429,7 @@ static int create_bmc_files(struct bmc_device *bmc)
 out_aux_firm:
        if (bmc->id.aux_firmware_revision_set)
                device_remove_file(&bmc->pdev.dev,
-                                  &bmc->aux_firmware_rev_attr);
+                                  &dev_attr_aux_firmware_revision);
 out:
        return err;
 }
index e178ac27e73c6de46ef48c2d94464cd64b4ca011..fd5a5e85d7dc604e2ebe237ee34ce9355d81d866 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/dmi.h>
 #include <linux/kthread.h>
 #include <linux/acpi.h>
+#include <linux/ctype.h>
 
 #define PFX "ipmi_ssif: "
 #define DEVICE_NAME "ipmi_ssif"
index 62e2509f9df16d52352da1d4aaa13b06d65b4824..bbdb1b985c9146a5e82013fd8d8ab20ea3ebc5fc 100644 (file)
@@ -57,7 +57,7 @@ static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw,
 static long clk_programmable_determine_rate(struct clk_hw *hw,
                                            unsigned long rate,
                                            unsigned long *best_parent_rate,
-                                           struct clk **best_parent_clk)
+                                           struct clk_hw **best_parent_hw)
 {
        struct clk *parent = NULL;
        long best_rate = -EINVAL;
@@ -84,7 +84,7 @@ static long clk_programmable_determine_rate(struct clk_hw *hw,
                if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) {
                        best_rate = tmp_rate;
                        *best_parent_rate = parent_rate;
-                       *best_parent_clk = parent;
+                       *best_parent_hw = __clk_get_hw(parent);
                }
 
                if (!best_rate)
index 95af2e665dd33cf769da657fc918949ef04a36f3..1c06f6f3a8c59959b90e90f554c048e1535a1893 100644 (file)
@@ -1032,7 +1032,7 @@ static long kona_peri_clk_round_rate(struct clk_hw *hw, unsigned long rate,
 }
 
 static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
-               unsigned long *best_parent_rate, struct clk **best_parent)
+               unsigned long *best_parent_rate, struct clk_hw **best_parent)
 {
        struct kona_clk *bcm_clk = to_kona_clk(hw);
        struct clk *clk = hw->clk;
@@ -1075,7 +1075,7 @@ static long kona_peri_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
                if (delta < best_delta) {
                        best_delta = delta;
                        best_rate = other_rate;
-                       *best_parent = parent;
+                       *best_parent = __clk_get_hw(parent);
                        *best_parent_rate = parent_rate;
                }
        }
index b9355daf8065fcbb6de7db2ca9cbb3593134c892..4386697236a78dc23aea66d0c4792873d558f71e 100644 (file)
@@ -57,7 +57,7 @@ static unsigned long clk_composite_recalc_rate(struct clk_hw *hw,
 
 static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
                                        unsigned long *best_parent_rate,
-                                       struct clk **best_parent_p)
+                                       struct clk_hw **best_parent_p)
 {
        struct clk_composite *composite = to_clk_composite(hw);
        const struct clk_ops *rate_ops = composite->rate_ops;
@@ -80,8 +80,9 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
                *best_parent_p = NULL;
 
                if (__clk_get_flags(hw->clk) & CLK_SET_RATE_NO_REPARENT) {
-                       *best_parent_p = clk_get_parent(mux_hw->clk);
-                       *best_parent_rate = __clk_get_rate(*best_parent_p);
+                       parent = clk_get_parent(mux_hw->clk);
+                       *best_parent_p = __clk_get_hw(parent);
+                       *best_parent_rate = __clk_get_rate(parent);
 
                        return rate_ops->round_rate(rate_hw, rate,
                                                    best_parent_rate);
@@ -103,7 +104,7 @@ static long clk_composite_determine_rate(struct clk_hw *hw, unsigned long rate,
 
                        if (!rate_diff || !*best_parent_p
                                       || best_rate_diff > rate_diff) {
-                               *best_parent_p = parent;
+                               *best_parent_p = __clk_get_hw(parent);
                                *best_parent_rate = parent_rate;
                                best_rate_diff = rate_diff;
                                best_rate = tmp_rate;
index 4f96ff3ba728321563cbdc6b728f9a25c65d74c6..6e1ecf94bf58daa279cb47e42065da9a9db3c581 100644 (file)
@@ -77,7 +77,7 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
 
        else {
                if (mux->flags & CLK_MUX_INDEX_BIT)
-                       index = (1 << ffs(index));
+                       index = 1 << index;
 
                if (mux->flags & CLK_MUX_INDEX_ONE)
                        index++;
index 87a41038237d64634a2372231ebf08654ea7c1a5..bfa1e64e267d36a9286d5701069305133b5a092c 100644 (file)
@@ -218,7 +218,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
        default:
                dev_err(&pdev->dev, "Invalid device type\n");
                return -EINVAL;
-       };
+       }
 
        /* Store clocks of_node in first element of s2mps11_clks array */
        s2mps11_clks->clk_np = s2mps11_clk_parse_dt(pdev, clks_init);
index 4896ae9e23da0c23ed52872cf1992f95609e1b59..f4963b7d4e17d41b6a6553854c5250a7e90bfdef 100644 (file)
@@ -240,7 +240,6 @@ static const struct file_operations clk_dump_fops = {
        .release        = single_release,
 };
 
-/* caller must hold prepare_lock */
 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
 {
        struct dentry *d;
@@ -354,13 +353,13 @@ out:
        mutex_unlock(&clk_debug_lock);
 }
 
-struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
+struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
                                void *data, const struct file_operations *fops)
 {
        struct dentry *d = NULL;
 
-       if (clk->dentry)
-               d = debugfs_create_file(name, mode, clk->dentry, data, fops);
+       if (hw->clk->dentry)
+               d = debugfs_create_file(name, mode, hw->clk->dentry, data, fops);
 
        return d;
 }
@@ -574,11 +573,6 @@ unsigned int __clk_get_enable_count(struct clk *clk)
        return !clk ? 0 : clk->enable_count;
 }
 
-unsigned int __clk_get_prepare_count(struct clk *clk)
-{
-       return !clk ? 0 : clk->prepare_count;
-}
-
 unsigned long __clk_get_rate(struct clk *clk)
 {
        unsigned long ret;
@@ -601,7 +595,7 @@ out:
 }
 EXPORT_SYMBOL_GPL(__clk_get_rate);
 
-unsigned long __clk_get_accuracy(struct clk *clk)
+static unsigned long __clk_get_accuracy(struct clk *clk)
 {
        if (!clk)
                return 0;
@@ -707,7 +701,7 @@ struct clk *__clk_lookup(const char *name)
  */
 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
                              unsigned long *best_parent_rate,
-                             struct clk **best_parent_p)
+                             struct clk_hw **best_parent_p)
 {
        struct clk *clk = hw->clk, *parent, *best_parent = NULL;
        int i, num_parents;
@@ -743,7 +737,7 @@ long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
 
 out:
        if (best_parent)
-               *best_parent_p = best_parent;
+               *best_parent_p = best_parent->hw;
        *best_parent_rate = best;
 
        return best;
@@ -951,6 +945,7 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
 {
        unsigned long parent_rate = 0;
        struct clk *parent;
+       struct clk_hw *parent_hw;
 
        if (!clk)
                return 0;
@@ -959,10 +954,11 @@ unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
        if (parent)
                parent_rate = parent->rate;
 
-       if (clk->ops->determine_rate)
+       if (clk->ops->determine_rate) {
+               parent_hw = parent ? parent->hw : NULL;
                return clk->ops->determine_rate(clk->hw, rate, &parent_rate,
-                                               &parent);
-       else if (clk->ops->round_rate)
+                                               &parent_hw);
+       else if (clk->ops->round_rate)
                return clk->ops->round_rate(clk->hw, rate, &parent_rate);
        else if (clk->flags & CLK_SET_RATE_PARENT)
                return __clk_round_rate(clk->parent, rate);
@@ -1350,6 +1346,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
 {
        struct clk *top = clk;
        struct clk *old_parent, *parent;
+       struct clk_hw *parent_hw;
        unsigned long best_parent_rate = 0;
        unsigned long new_rate;
        int p_index = 0;
@@ -1365,9 +1362,11 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
 
        /* find the closest rate and parent clk/rate */
        if (clk->ops->determine_rate) {
+               parent_hw = parent ? parent->hw : NULL;
                new_rate = clk->ops->determine_rate(clk->hw, rate,
                                                    &best_parent_rate,
-                                                   &parent);
+                                                   &parent_hw);
+               parent = parent_hw->clk;
        } else if (clk->ops->round_rate) {
                new_rate = clk->ops->round_rate(clk->hw, rate,
                                                &best_parent_rate);
@@ -1614,7 +1613,7 @@ static struct clk *__clk_init_parent(struct clk *clk)
 
        if (clk->num_parents == 1) {
                if (IS_ERR_OR_NULL(clk->parent))
-                       ret = clk->parent = __clk_lookup(clk->parent_names[0]);
+                       clk->parent = __clk_lookup(clk->parent_names[0]);
                ret = clk->parent;
                goto out;
        }
@@ -1944,7 +1943,6 @@ int __clk_init(struct device *dev, struct clk *clk)
        else
                clk->rate = 0;
 
-       clk_debug_register(clk);
        /*
         * walk the list of orphan clocks and reparent any that are children of
         * this clock
@@ -1979,6 +1977,9 @@ int __clk_init(struct device *dev, struct clk *clk)
 out:
        clk_prepare_unlock();
 
+       if (!ret)
+               clk_debug_register(clk);
+
        return ret;
 }
 
@@ -2273,14 +2274,17 @@ int __clk_get(struct clk *clk)
 
 void __clk_put(struct clk *clk)
 {
+       struct module *owner;
+
        if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
                return;
 
        clk_prepare_lock();
+       owner = clk->owner;
        kref_put(&clk->ref, __clk_release);
        clk_prepare_unlock();
 
-       module_put(clk->owner);
+       module_put(owner);
 }
 
 /***        clk rate change notifiers        ***/
index 339945d2503b33798ca4e98b51e140f2b8f658ae..007144f81f50b63301f211103dd200f7f21a4ec4 100644 (file)
 #include "clk.h"
 
 /* clock parent list */
-static const char *timer0_mux_p[] __initdata = { "osc32k", "timerclk01", };
-static const char *timer1_mux_p[] __initdata = { "osc32k", "timerclk01", };
-static const char *timer2_mux_p[] __initdata = { "osc32k", "timerclk23", };
-static const char *timer3_mux_p[] __initdata = { "osc32k", "timerclk23", };
-static const char *timer4_mux_p[] __initdata = { "osc32k", "timerclk45", };
-static const char *timer5_mux_p[] __initdata = { "osc32k", "timerclk45", };
-static const char *timer6_mux_p[] __initdata = { "osc32k", "timerclk67", };
-static const char *timer7_mux_p[] __initdata = { "osc32k", "timerclk67", };
-static const char *timer8_mux_p[] __initdata = { "osc32k", "timerclk89", };
-static const char *timer9_mux_p[] __initdata = { "osc32k", "timerclk89", };
-static const char *uart0_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *uart1_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *uart2_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *uart3_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *uart4_mux_p[] __initdata = { "osc26m", "pclk", };
-static const char *spi0_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
-static const char *spi1_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
-static const char *spi2_mux_p[] __initdata = { "osc26m", "rclk_cfgaxi", };
+static const char *timer0_mux_p[] __initconst = { "osc32k", "timerclk01", };
+static const char *timer1_mux_p[] __initconst = { "osc32k", "timerclk01", };
+static const char *timer2_mux_p[] __initconst = { "osc32k", "timerclk23", };
+static const char *timer3_mux_p[] __initconst = { "osc32k", "timerclk23", };
+static const char *timer4_mux_p[] __initconst = { "osc32k", "timerclk45", };
+static const char *timer5_mux_p[] __initconst = { "osc32k", "timerclk45", };
+static const char *timer6_mux_p[] __initconst = { "osc32k", "timerclk67", };
+static const char *timer7_mux_p[] __initconst = { "osc32k", "timerclk67", };
+static const char *timer8_mux_p[] __initconst = { "osc32k", "timerclk89", };
+static const char *timer9_mux_p[] __initconst = { "osc32k", "timerclk89", };
+static const char *uart0_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *uart1_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *uart2_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *uart3_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *uart4_mux_p[] __initconst = { "osc26m", "pclk", };
+static const char *spi0_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
+static const char *spi1_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
+static const char *spi2_mux_p[] __initconst = { "osc26m", "rclk_cfgaxi", };
 /* share axi parent */
-static const char *saxi_mux_p[] __initdata = { "armpll3", "armpll2", };
-static const char *pwm0_mux_p[] __initdata = { "osc32k", "osc26m", };
-static const char *pwm1_mux_p[] __initdata = { "osc32k", "osc26m", };
-static const char *sd_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *mmc1_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *mmc1_mux2_p[] __initdata = { "osc26m", "mmc1_div", };
-static const char *g2d_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *venc_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *vdec_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *vpp_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *edc0_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *ldi0_mux_p[] __initdata = { "armpll2", "armpll4",
+static const char *saxi_mux_p[] __initconst = { "armpll3", "armpll2", };
+static const char *pwm0_mux_p[] __initconst = { "osc32k", "osc26m", };
+static const char *pwm1_mux_p[] __initconst = { "osc32k", "osc26m", };
+static const char *sd_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *mmc1_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *mmc1_mux2_p[] __initconst = { "osc26m", "mmc1_div", };
+static const char *g2d_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *venc_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *vdec_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *vpp_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *edc0_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *ldi0_mux_p[] __initconst = { "armpll2", "armpll4",
                                             "armpll3", "armpll5", };
-static const char *edc1_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *ldi1_mux_p[] __initdata = { "armpll2", "armpll4",
+static const char *edc1_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *ldi1_mux_p[] __initconst = { "armpll2", "armpll4",
                                             "armpll3", "armpll5", };
-static const char *rclk_hsic_p[] __initdata = { "armpll3", "armpll2", };
-static const char *mmc2_mux_p[] __initdata = { "armpll2", "armpll3", };
-static const char *mmc3_mux_p[] __initdata = { "armpll2", "armpll3", };
+static const char *rclk_hsic_p[] __initconst = { "armpll3", "armpll2", };
+static const char *mmc2_mux_p[] __initconst = { "armpll2", "armpll3", };
+static const char *mmc3_mux_p[] __initconst = { "armpll2", "armpll3", };
 
 
 /* fixed rate clocks */
@@ -296,7 +296,7 @@ static unsigned long mmc_clk_recalc_rate(struct clk_hw *hw,
 
 static long mmc_clk_determine_rate(struct clk_hw *hw, unsigned long rate,
                              unsigned long *best_parent_rate,
-                             struct clk **best_parent_p)
+                             struct clk_hw **best_parent_p)
 {
        struct clk_mmc *mclk = to_mmc(hw);
        unsigned long best = 0;
index 392d78044ce3748903ad1614ffc4589a7cdb168c..3caaf7cc169c684559973f105317dea76bbd1d54 100644 (file)
@@ -2,7 +2,12 @@
 # Makefile for mmp specific clk
 #
 
-obj-y += clk-apbc.o clk-apmu.o clk-frac.o
+obj-y += clk-apbc.o clk-apmu.o clk-frac.o clk-mix.o clk-gate.o clk.o
+
+obj-$(CONFIG_RESET_CONTROLLER) += reset.o
+
+obj-$(CONFIG_MACH_MMP_DT) += clk-of-pxa168.o clk-of-pxa910.o
+obj-$(CONFIG_MACH_MMP2_DT) += clk-of-mmp2.o
 
 obj-$(CONFIG_CPU_PXA168) += clk-pxa168.o
 obj-$(CONFIG_CPU_PXA910) += clk-pxa910.o
index 23a56f561812decc149624fe72b803a7bbb726ab..584a9927993b41f73df73f40ec598f4e7d45ca1d 100644 (file)
  * numerator/denominator = Fin / (Fout * factor)
  */
 
-#define to_clk_factor(hw) container_of(hw, struct clk_factor, hw)
-struct clk_factor {
-       struct clk_hw           hw;
-       void __iomem            *base;
-       struct clk_factor_masks *masks;
-       struct clk_factor_tbl   *ftbl;
-       unsigned int            ftbl_cnt;
-};
+#define to_clk_factor(hw) container_of(hw, struct mmp_clk_factor, hw)
 
 static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
                unsigned long *prate)
 {
-       struct clk_factor *factor = to_clk_factor(hw);
+       struct mmp_clk_factor *factor = to_clk_factor(hw);
        unsigned long rate = 0, prev_rate;
        int i;
 
@@ -58,8 +51,8 @@ static long clk_factor_round_rate(struct clk_hw *hw, unsigned long drate,
 static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
                unsigned long parent_rate)
 {
-       struct clk_factor *factor = to_clk_factor(hw);
-       struct clk_factor_masks *masks = factor->masks;
+       struct mmp_clk_factor *factor = to_clk_factor(hw);
+       struct mmp_clk_factor_masks *masks = factor->masks;
        unsigned int val, num, den;
 
        val = readl_relaxed(factor->base);
@@ -81,11 +74,12 @@ static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
 static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
                                unsigned long prate)
 {
-       struct clk_factor *factor = to_clk_factor(hw);
-       struct clk_factor_masks *masks = factor->masks;
+       struct mmp_clk_factor *factor = to_clk_factor(hw);
+       struct mmp_clk_factor_masks *masks = factor->masks;
        int i;
        unsigned long val;
        unsigned long prev_rate, rate = 0;
+       unsigned long flags = 0;
 
        for (i = 0; i < factor->ftbl_cnt; i++) {
                prev_rate = rate;
@@ -97,6 +91,9 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
        if (i > 0)
                i--;
 
+       if (factor->lock)
+               spin_lock_irqsave(factor->lock, flags);
+
        val = readl_relaxed(factor->base);
 
        val &= ~(masks->num_mask << masks->num_shift);
@@ -107,21 +104,65 @@ static int clk_factor_set_rate(struct clk_hw *hw, unsigned long drate,
 
        writel_relaxed(val, factor->base);
 
+       if (factor->lock)
+               spin_unlock_irqrestore(factor->lock, flags);
+
        return 0;
 }
 
+static void clk_factor_init(struct clk_hw *hw)
+{
+       struct mmp_clk_factor *factor = to_clk_factor(hw);
+       struct mmp_clk_factor_masks *masks = factor->masks;
+       u32 val, num, den;
+       int i;
+       unsigned long flags = 0;
+
+       if (factor->lock)
+               spin_lock_irqsave(factor->lock, flags);
+
+       val = readl(factor->base);
+
+       /* calculate numerator */
+       num = (val >> masks->num_shift) & masks->num_mask;
+
+       /* calculate denominator */
+       den = (val >> masks->den_shift) & masks->den_mask;
+
+       for (i = 0; i < factor->ftbl_cnt; i++)
+               if (den == factor->ftbl[i].den && num == factor->ftbl[i].num)
+                       break;
+
+       if (i >= factor->ftbl_cnt) {
+               val &= ~(masks->num_mask << masks->num_shift);
+               val |= (factor->ftbl[0].num & masks->num_mask) <<
+                       masks->num_shift;
+
+               val &= ~(masks->den_mask << masks->den_shift);
+               val |= (factor->ftbl[0].den & masks->den_mask) <<
+                       masks->den_shift;
+
+               writel(val, factor->base);
+       }
+
+       if (factor->lock)
+               spin_unlock_irqrestore(factor->lock, flags);
+}
+
 static struct clk_ops clk_factor_ops = {
        .recalc_rate = clk_factor_recalc_rate,
        .round_rate = clk_factor_round_rate,
        .set_rate = clk_factor_set_rate,
+       .init = clk_factor_init,
 };
 
 struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
                unsigned long flags, void __iomem *base,
-               struct clk_factor_masks *masks, struct clk_factor_tbl *ftbl,
-               unsigned int ftbl_cnt)
+               struct mmp_clk_factor_masks *masks,
+               struct mmp_clk_factor_tbl *ftbl,
+               unsigned int ftbl_cnt, spinlock_t *lock)
 {
-       struct clk_factor *factor;
+       struct mmp_clk_factor *factor;
        struct clk_init_data init;
        struct clk *clk;
 
@@ -142,6 +183,7 @@ struct clk *mmp_clk_register_factor(const char *name, const char *parent_name,
        factor->ftbl = ftbl;
        factor->ftbl_cnt = ftbl_cnt;
        factor->hw.init = &init;
+       factor->lock = lock;
 
        init.name = name;
        init.ops = &clk_factor_ops;
diff --git a/drivers/clk/mmp/clk-gate.c b/drivers/clk/mmp/clk-gate.c
new file mode 100644 (file)
index 0000000..adbd9d6
--- /dev/null
@@ -0,0 +1,133 @@
+/*
+ * mmp gate clock operation source file
+ *
+ * Copyright (C) 2014 Marvell
+ * Chao Xie <chao.xie@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include "clk.h"
+
+/*
+ * Some clocks will have mutiple bits to enable the clocks, and
+ * the bits to disable the clock is not same as enabling bits.
+ */
+
+#define to_clk_mmp_gate(hw)    container_of(hw, struct mmp_clk_gate, hw)
+
+static int mmp_clk_gate_enable(struct clk_hw *hw)
+{
+       struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
+       struct clk *clk = hw->clk;
+       unsigned long flags = 0;
+       unsigned long rate;
+       u32 tmp;
+
+       if (gate->lock)
+               spin_lock_irqsave(gate->lock, flags);
+
+       tmp = readl(gate->reg);
+       tmp &= ~gate->mask;
+       tmp |= gate->val_enable;
+       writel(tmp, gate->reg);
+
+       if (gate->lock)
+               spin_unlock_irqrestore(gate->lock, flags);
+
+       if (gate->flags & MMP_CLK_GATE_NEED_DELAY) {
+               rate = __clk_get_rate(clk);
+               /* Need delay 2 cycles. */
+               udelay(2000000/rate);
+       }
+
+       return 0;
+}
+
+static void mmp_clk_gate_disable(struct clk_hw *hw)
+{
+       struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
+       unsigned long flags = 0;
+       u32 tmp;
+
+       if (gate->lock)
+               spin_lock_irqsave(gate->lock, flags);
+
+       tmp = readl(gate->reg);
+       tmp &= ~gate->mask;
+       tmp |= gate->val_disable;
+       writel(tmp, gate->reg);
+
+       if (gate->lock)
+               spin_unlock_irqrestore(gate->lock, flags);
+}
+
+static int mmp_clk_gate_is_enabled(struct clk_hw *hw)
+{
+       struct mmp_clk_gate *gate = to_clk_mmp_gate(hw);
+       unsigned long flags = 0;
+       u32 tmp;
+
+       if (gate->lock)
+               spin_lock_irqsave(gate->lock, flags);
+
+       tmp = readl(gate->reg);
+
+       if (gate->lock)
+               spin_unlock_irqrestore(gate->lock, flags);
+
+       return (tmp & gate->mask) == gate->val_enable;
+}
+
+const struct clk_ops mmp_clk_gate_ops = {
+       .enable = mmp_clk_gate_enable,
+       .disable = mmp_clk_gate_disable,
+       .is_enabled = mmp_clk_gate_is_enabled,
+};
+
+struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
+               const char *parent_name, unsigned long flags,
+               void __iomem *reg, u32 mask, u32 val_enable, u32 val_disable,
+               unsigned int gate_flags, spinlock_t *lock)
+{
+       struct mmp_clk_gate *gate;
+       struct clk *clk;
+       struct clk_init_data init;
+
+       /* allocate the gate */
+       gate = kzalloc(sizeof(*gate), GFP_KERNEL);
+       if (!gate) {
+               pr_err("%s:%s could not allocate gate clk\n", __func__, name);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       init.name = name;
+       init.ops = &mmp_clk_gate_ops;
+       init.flags = flags | CLK_IS_BASIC;
+       init.parent_names = (parent_name ? &parent_name : NULL);
+       init.num_parents = (parent_name ? 1 : 0);
+
+       /* struct clk_gate assignments */
+       gate->reg = reg;
+       gate->mask = mask;
+       gate->val_enable = val_enable;
+       gate->val_disable = val_disable;
+       gate->flags = gate_flags;
+       gate->lock = lock;
+       gate->hw.init = &init;
+
+       clk = clk_register(dev, &gate->hw);
+
+       if (IS_ERR(clk))
+               kfree(gate);
+
+       return clk;
+}
diff --git a/drivers/clk/mmp/clk-mix.c b/drivers/clk/mmp/clk-mix.c
new file mode 100644 (file)
index 0000000..48fa53c
--- /dev/null
@@ -0,0 +1,513 @@
+/*
+ * mmp mix(div and mux) clock operation source file
+ *
+ * Copyright (C) 2014 Marvell
+ * Chao Xie <chao.xie@marvell.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/err.h>
+
+#include "clk.h"
+
+/*
+ * The mix clock is a clock combined mux and div type clock.
+ * Because the div field and mux field need to be set at same
+ * time, we can not divide it into 2 types of clock
+ */
+
+#define to_clk_mix(hw) container_of(hw, struct mmp_clk_mix, hw)
+
+static unsigned int _get_maxdiv(struct mmp_clk_mix *mix)
+{
+       unsigned int div_mask = (1 << mix->reg_info.width_div) - 1;
+       unsigned int maxdiv = 0;
+       struct clk_div_table *clkt;
+
+       if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
+               return div_mask;
+       if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
+               return 1 << div_mask;
+       if (mix->div_table) {
+               for (clkt = mix->div_table; clkt->div; clkt++)
+                       if (clkt->div > maxdiv)
+                               maxdiv = clkt->div;
+               return maxdiv;
+       }
+       return div_mask + 1;
+}
+
+static unsigned int _get_div(struct mmp_clk_mix *mix, unsigned int val)
+{
+       struct clk_div_table *clkt;
+
+       if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
+               return val;
+       if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
+               return 1 << val;
+       if (mix->div_table) {
+               for (clkt = mix->div_table; clkt->div; clkt++)
+                       if (clkt->val == val)
+                               return clkt->div;
+               if (clkt->div == 0)
+                       return 0;
+       }
+       return val + 1;
+}
+
+static unsigned int _get_mux(struct mmp_clk_mix *mix, unsigned int val)
+{
+       int num_parents = __clk_get_num_parents(mix->hw.clk);
+       int i;
+
+       if (mix->mux_flags & CLK_MUX_INDEX_BIT)
+               return ffs(val) - 1;
+       if (mix->mux_flags & CLK_MUX_INDEX_ONE)
+               return val - 1;
+       if (mix->mux_table) {
+               for (i = 0; i < num_parents; i++)
+                       if (mix->mux_table[i] == val)
+                               return i;
+               if (i == num_parents)
+                       return 0;
+       }
+
+       return val;
+}
+static unsigned int _get_div_val(struct mmp_clk_mix *mix, unsigned int div)
+{
+       struct clk_div_table *clkt;
+
+       if (mix->div_flags & CLK_DIVIDER_ONE_BASED)
+               return div;
+       if (mix->div_flags & CLK_DIVIDER_POWER_OF_TWO)
+               return __ffs(div);
+       if (mix->div_table) {
+               for (clkt = mix->div_table; clkt->div; clkt++)
+                       if (clkt->div == div)
+                               return clkt->val;
+               if (clkt->div == 0)
+                       return 0;
+       }
+
+       return div - 1;
+}
+
+static unsigned int _get_mux_val(struct mmp_clk_mix *mix, unsigned int mux)
+{
+       if (mix->mux_table)
+               return mix->mux_table[mux];
+
+       return mux;
+}
+
+static void _filter_clk_table(struct mmp_clk_mix *mix,
+                               struct mmp_clk_mix_clk_table *table,
+                               unsigned int table_size)
+{
+       int i;
+       struct mmp_clk_mix_clk_table *item;
+       struct clk *parent, *clk;
+       unsigned long parent_rate;
+
+       clk = mix->hw.clk;
+
+       for (i = 0; i < table_size; i++) {
+               item = &table[i];
+               parent = clk_get_parent_by_index(clk, item->parent_index);
+               parent_rate = __clk_get_rate(parent);
+               if (parent_rate % item->rate) {
+                       item->valid = 0;
+               } else {
+                       item->divisor = parent_rate / item->rate;
+                       item->valid = 1;
+               }
+       }
+}
+
+static int _set_rate(struct mmp_clk_mix *mix, u32 mux_val, u32 div_val,
+                       unsigned int change_mux, unsigned int change_div)
+{
+       struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
+       u8 width, shift;
+       u32 mux_div, fc_req;
+       int ret, timeout = 50;
+       unsigned long flags = 0;
+
+       if (!change_mux && !change_div)
+               return -EINVAL;
+
+       if (mix->lock)
+               spin_lock_irqsave(mix->lock, flags);
+
+       if (mix->type == MMP_CLK_MIX_TYPE_V1
+               || mix->type == MMP_CLK_MIX_TYPE_V2)
+               mux_div = readl(ri->reg_clk_ctrl);
+       else
+               mux_div = readl(ri->reg_clk_sel);
+
+       if (change_div) {
+               width = ri->width_div;
+               shift = ri->shift_div;
+               mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
+               mux_div |= MMP_CLK_BITS_SET_VAL(div_val, width, shift);
+       }
+
+       if (change_mux) {
+               width = ri->width_mux;
+               shift = ri->shift_mux;
+               mux_div &= ~MMP_CLK_BITS_MASK(width, shift);
+               mux_div |= MMP_CLK_BITS_SET_VAL(mux_val, width, shift);
+       }
+
+       if (mix->type == MMP_CLK_MIX_TYPE_V1) {
+               writel(mux_div, ri->reg_clk_ctrl);
+       } else if (mix->type == MMP_CLK_MIX_TYPE_V2) {
+               mux_div |= (1 << ri->bit_fc);
+               writel(mux_div, ri->reg_clk_ctrl);
+
+               do {
+                       fc_req = readl(ri->reg_clk_ctrl);
+                       timeout--;
+                       if (!(fc_req & (1 << ri->bit_fc)))
+                               break;
+               } while (timeout);
+
+               if (timeout == 0) {
+                       pr_err("%s:%s cannot do frequency change\n",
+                               __func__, __clk_get_name(mix->hw.clk));
+                       ret = -EBUSY;
+                       goto error;
+               }
+       } else {
+               fc_req = readl(ri->reg_clk_ctrl);
+               fc_req |= 1 << ri->bit_fc;
+               writel(fc_req, ri->reg_clk_ctrl);
+               writel(mux_div, ri->reg_clk_sel);
+               fc_req &= ~(1 << ri->bit_fc);
+       }
+
+       ret = 0;
+error:
+       if (mix->lock)
+               spin_unlock_irqrestore(mix->lock, flags);
+
+       return ret;
+}
+
+static long mmp_clk_mix_determine_rate(struct clk_hw *hw, unsigned long rate,
+                                       unsigned long *best_parent_rate,
+                                       struct clk_hw **best_parent_clk)
+{
+       struct mmp_clk_mix *mix = to_clk_mix(hw);
+       struct mmp_clk_mix_clk_table *item;
+       struct clk *parent, *parent_best, *mix_clk;
+       unsigned long parent_rate, mix_rate, mix_rate_best, parent_rate_best;
+       unsigned long gap, gap_best;
+       u32 div_val_max;
+       unsigned int div;
+       int i, j;
+
+       mix_clk = hw->clk;
+
+       parent = NULL;
+       mix_rate_best = 0;
+       parent_rate_best = 0;
+       gap_best = rate;
+       parent_best = NULL;
+
+       if (mix->table) {
+               for (i = 0; i < mix->table_size; i++) {
+                       item = &mix->table[i];
+                       if (item->valid == 0)
+                               continue;
+                       parent = clk_get_parent_by_index(mix_clk,
+                                                       item->parent_index);
+                       parent_rate = __clk_get_rate(parent);
+                       mix_rate = parent_rate / item->divisor;
+                       gap = abs(mix_rate - rate);
+                       if (parent_best == NULL || gap < gap_best) {
+                               parent_best = parent;
+                               parent_rate_best = parent_rate;
+                               mix_rate_best = mix_rate;
+                               gap_best = gap;
+                               if (gap_best == 0)
+                                       goto found;
+                       }
+               }
+       } else {
+               for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
+                       parent = clk_get_parent_by_index(mix_clk, i);
+                       parent_rate = __clk_get_rate(parent);
+                       div_val_max = _get_maxdiv(mix);
+                       for (j = 0; j < div_val_max; j++) {
+                               div = _get_div(mix, j);
+                               mix_rate = parent_rate / div;
+                               gap = abs(mix_rate - rate);
+                               if (parent_best == NULL || gap < gap_best) {
+                                       parent_best = parent;
+                                       parent_rate_best = parent_rate;
+                                       mix_rate_best = mix_rate;
+                                       gap_best = gap;
+                                       if (gap_best == 0)
+                                               goto found;
+                               }
+                       }
+               }
+       }
+
+found:
+       *best_parent_rate = parent_rate_best;
+       *best_parent_clk = __clk_get_hw(parent_best);
+
+       return mix_rate_best;
+}
+
+static int mmp_clk_mix_set_rate_and_parent(struct clk_hw *hw,
+                                               unsigned long rate,
+                                               unsigned long parent_rate,
+                                               u8 index)
+{
+       struct mmp_clk_mix *mix = to_clk_mix(hw);
+       unsigned int div;
+       u32 div_val, mux_val;
+
+       div = parent_rate / rate;
+       div_val = _get_div_val(mix, div);
+       mux_val = _get_mux_val(mix, index);
+
+       return _set_rate(mix, mux_val, div_val, 1, 1);
+}
+
+static u8 mmp_clk_mix_get_parent(struct clk_hw *hw)
+{
+       struct mmp_clk_mix *mix = to_clk_mix(hw);
+       struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
+       unsigned long flags = 0;
+       u32 mux_div = 0;
+       u8 width, shift;
+       u32 mux_val;
+
+       if (mix->lock)
+               spin_lock_irqsave(mix->lock, flags);
+
+       if (mix->type == MMP_CLK_MIX_TYPE_V1
+               || mix->type == MMP_CLK_MIX_TYPE_V2)
+               mux_div = readl(ri->reg_clk_ctrl);
+       else
+               mux_div = readl(ri->reg_clk_sel);
+
+       if (mix->lock)
+               spin_unlock_irqrestore(mix->lock, flags);
+
+       width = mix->reg_info.width_mux;
+       shift = mix->reg_info.shift_mux;
+
+       mux_val = MMP_CLK_BITS_GET_VAL(mux_div, width, shift);
+
+       return _get_mux(mix, mux_val);
+}
+
+static unsigned long mmp_clk_mix_recalc_rate(struct clk_hw *hw,
+                                       unsigned long parent_rate)
+{
+       struct mmp_clk_mix *mix = to_clk_mix(hw);
+       struct mmp_clk_mix_reg_info *ri = &mix->reg_info;
+       unsigned long flags = 0;
+       u32 mux_div = 0;
+       u8 width, shift;
+       unsigned int div;
+
+       if (mix->lock)
+               spin_lock_irqsave(mix->lock, flags);
+
+       if (mix->type == MMP_CLK_MIX_TYPE_V1
+               || mix->type == MMP_CLK_MIX_TYPE_V2)
+               mux_div = readl(ri->reg_clk_ctrl);
+       else
+               mux_div = readl(ri->reg_clk_sel);
+
+       if (mix->lock)
+               spin_unlock_irqrestore(mix->lock, flags);
+
+       width = mix->reg_info.width_div;
+       shift = mix->reg_info.shift_div;
+
+       div = _get_div(mix, MMP_CLK_BITS_GET_VAL(mux_div, width, shift));
+
+       return parent_rate / div;
+}
+
+static int mmp_clk_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct mmp_clk_mix *mix = to_clk_mix(hw);
+       struct mmp_clk_mix_clk_table *item;
+       int i;
+       u32 div_val, mux_val;
+
+       if (mix->table) {
+               for (i = 0; i < mix->table_size; i++) {
+                       item = &mix->table[i];
+                       if (item->valid == 0)
+                               continue;
+                       if (item->parent_index == index)
+                               break;
+               }
+               if (i < mix->table_size) {
+                       div_val = _get_div_val(mix, item->divisor);
+                       mux_val = _get_mux_val(mix, item->parent_index);
+               } else
+                       return -EINVAL;
+       } else {
+               mux_val = _get_mux_val(mix, index);
+               div_val = 0;
+       }
+
+       return _set_rate(mix, mux_val, div_val, 1, div_val ? 1 : 0);
+}
+
+static int mmp_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+                               unsigned long best_parent_rate)
+{
+       struct mmp_clk_mix *mix = to_clk_mix(hw);
+       struct mmp_clk_mix_clk_table *item;
+       unsigned long parent_rate;
+       unsigned int best_divisor;
+       struct clk *mix_clk, *parent;
+       int i;
+
+       best_divisor = best_parent_rate / rate;
+
+       mix_clk = hw->clk;
+       if (mix->table) {
+               for (i = 0; i < mix->table_size; i++) {
+                       item = &mix->table[i];
+                       if (item->valid == 0)
+                               continue;
+                       parent = clk_get_parent_by_index(mix_clk,
+                                                       item->parent_index);
+                       parent_rate = __clk_get_rate(parent);
+                       if (parent_rate == best_parent_rate
+                               && item->divisor == best_divisor)
+                               break;
+               }
+               if (i < mix->table_size)
+                       return _set_rate(mix,
+                                       _get_mux_val(mix, item->parent_index),
+                                       _get_div_val(mix, item->divisor),
+                                       1, 1);
+               else
+                       return -EINVAL;
+       } else {
+               for (i = 0; i < __clk_get_num_parents(mix_clk); i++) {
+                       parent = clk_get_parent_by_index(mix_clk, i);
+                       parent_rate = __clk_get_rate(parent);
+                       if (parent_rate == best_parent_rate)
+                               break;
+               }
+               if (i < __clk_get_num_parents(mix_clk))
+                       return _set_rate(mix, _get_mux_val(mix, i),
+                                       _get_div_val(mix, best_divisor), 1, 1);
+               else
+                       return -EINVAL;
+       }
+}
+
+static void mmp_clk_mix_init(struct clk_hw *hw)
+{
+       struct mmp_clk_mix *mix = to_clk_mix(hw);
+
+       if (mix->table)
+               _filter_clk_table(mix, mix->table, mix->table_size);
+}
+
+const struct clk_ops mmp_clk_mix_ops = {
+       .determine_rate = mmp_clk_mix_determine_rate,
+       .set_rate_and_parent = mmp_clk_mix_set_rate_and_parent,
+       .set_rate = mmp_clk_set_rate,
+       .set_parent = mmp_clk_set_parent,
+       .get_parent = mmp_clk_mix_get_parent,
+       .recalc_rate = mmp_clk_mix_recalc_rate,
+       .init = mmp_clk_mix_init,
+};
+
+struct clk *mmp_clk_register_mix(struct device *dev,
+                                       const char *name,
+                                       const char **parent_names,
+                                       u8 num_parents,
+                                       unsigned long flags,
+                                       struct mmp_clk_mix_config *config,
+                                       spinlock_t *lock)
+{
+       struct mmp_clk_mix *mix;
+       struct clk *clk;
+       struct clk_init_data init;
+       size_t table_bytes;
+
+       mix = kzalloc(sizeof(*mix), GFP_KERNEL);
+       if (!mix) {
+               pr_err("%s:%s: could not allocate mmp mix clk\n",
+                       __func__, name);
+               return ERR_PTR(-ENOMEM);
+       }
+
+       init.name = name;
+       init.flags = flags | CLK_GET_RATE_NOCACHE;
+       init.parent_names = parent_names;
+       init.num_parents = num_parents;
+       init.ops = &mmp_clk_mix_ops;
+
+       memcpy(&mix->reg_info, &config->reg_info, sizeof(config->reg_info));
+       if (config->table) {
+               table_bytes = sizeof(*config->table) * config->table_size;
+               mix->table = kzalloc(table_bytes, GFP_KERNEL);
+               if (!mix->table) {
+                       pr_err("%s:%s: could not allocate mmp mix table\n",
+                               __func__, name);
+                       kfree(mix);
+                       return ERR_PTR(-ENOMEM);
+               }
+               memcpy(mix->table, config->table, table_bytes);
+               mix->table_size = config->table_size;
+       }
+
+       if (config->mux_table) {
+               table_bytes = sizeof(u32) * num_parents;
+               mix->mux_table = kzalloc(table_bytes, GFP_KERNEL);
+               if (!mix->mux_table) {
+                       pr_err("%s:%s: could not allocate mmp mix mux-table\n",
+                               __func__, name);
+                       kfree(mix->table);
+                       kfree(mix);
+                       return ERR_PTR(-ENOMEM);
+               }
+               memcpy(mix->mux_table, config->mux_table, table_bytes);
+       }
+
+       mix->div_flags = config->div_flags;
+       mix->mux_flags = config->mux_flags;
+       mix->lock = lock;
+       mix->hw.init = &init;
+
+       if (config->reg_info.bit_fc >= 32)
+               mix->type = MMP_CLK_MIX_TYPE_V1;
+       else if (config->reg_info.reg_clk_sel)
+               mix->type = MMP_CLK_MIX_TYPE_V3;
+       else
+               mix->type = MMP_CLK_MIX_TYPE_V2;
+       clk = clk_register(dev, &mix->hw);
+
+       if (IS_ERR(clk)) {
+               kfree(mix->mux_table);
+               kfree(mix->table);
+               kfree(mix);
+       }
+
+       return clk;
+}
index b2721cae257adcca4a106cee4e9a9e443bf5d808..5c90a4230fa3d3f2718c7ed6452c32c51df70b24 100644 (file)
@@ -54,7 +54,7 @@
 
 static DEFINE_SPINLOCK(clk_lock);
 
-static struct clk_factor_masks uart_factor_masks = {
+static struct mmp_clk_factor_masks uart_factor_masks = {
        .factor = 2,
        .num_mask = 0x1fff,
        .den_mask = 0x1fff,
@@ -62,7 +62,7 @@ static struct clk_factor_masks uart_factor_masks = {
        .den_shift = 0,
 };
 
-static struct clk_factor_tbl uart_factor_tbl[] = {
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
        {.num = 14634, .den = 2165},    /*14.745MHZ */
        {.num = 3521, .den = 689},      /*19.23MHZ */
        {.num = 9679, .den = 5728},     /*58.9824MHZ */
@@ -191,7 +191,7 @@ void __init mmp2_clk_init(void)
        clk = mmp_clk_register_factor("uart_pll", "pll1_4", 0,
                                mpmu_base + MPMU_UART_PLL,
                                &uart_factor_masks, uart_factor_tbl,
-                               ARRAY_SIZE(uart_factor_tbl));
+                               ARRAY_SIZE(uart_factor_tbl), &clk_lock);
        clk_set_rate(clk, 14745600);
        clk_register_clkdev(clk, "uart_pll", NULL);
 
diff --git a/drivers/clk/mmp/clk-of-mmp2.c b/drivers/clk/mmp/clk-of-mmp2.c
new file mode 100644 (file)
index 0000000..2cbc2b4
--- /dev/null
@@ -0,0 +1,334 @@
+/*
+ * mmp2 clock framework source file
+ *
+ * Copyright (C) 2012 Marvell
+ * Chao Xie <xiechao.mail@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/marvell,mmp2.h>
+
+#include "clk.h"
+#include "reset.h"
+
+#define APBC_RTC       0x0
+#define APBC_TWSI0     0x4
+#define APBC_TWSI1     0x8
+#define APBC_TWSI2     0xc
+#define APBC_TWSI3     0x10
+#define APBC_TWSI4     0x7c
+#define APBC_TWSI5     0x80
+#define APBC_KPC       0x18
+#define APBC_UART0     0x2c
+#define APBC_UART1     0x30
+#define APBC_UART2     0x34
+#define APBC_UART3     0x88
+#define APBC_GPIO      0x38
+#define APBC_PWM0      0x3c
+#define APBC_PWM1      0x40
+#define APBC_PWM2      0x44
+#define APBC_PWM3      0x48
+#define APBC_SSP0      0x50
+#define APBC_SSP1      0x54
+#define APBC_SSP2      0x58
+#define APBC_SSP3      0x5c
+#define APMU_SDH0      0x54
+#define APMU_SDH1      0x58
+#define APMU_SDH2      0xe8
+#define APMU_SDH3      0xec
+#define APMU_USB       0x5c
+#define APMU_DISP0     0x4c
+#define APMU_DISP1     0x110
+#define APMU_CCIC0     0x50
+#define APMU_CCIC1     0xf4
+#define MPMU_UART_PLL  0x14
+
+struct mmp2_clk_unit {
+       struct mmp_clk_unit unit;
+       void __iomem *mpmu_base;
+       void __iomem *apmu_base;
+       void __iomem *apbc_base;
+};
+
+static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
+       {MMP2_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
+       {MMP2_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
+       {MMP2_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 800000000},
+       {MMP2_CLK_PLL2, "pll2", NULL, CLK_IS_ROOT, 960000000},
+       {MMP2_CLK_USB_PLL, "usb_pll", NULL, CLK_IS_ROOT, 480000000},
+};
+
+static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
+       {MMP2_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
+       {MMP2_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
+       {MMP2_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
+       {MMP2_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
+       {MMP2_CLK_PLL1_20, "pll1_20", "pll1_4", 1, 5, 0},
+       {MMP2_CLK_PLL1_3, "pll1_3", "pll1", 1, 3, 0},
+       {MMP2_CLK_PLL1_6, "pll1_6", "pll1_3", 1, 2, 0},
+       {MMP2_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
+       {MMP2_CLK_PLL2_2, "pll2_2", "pll2", 1, 2, 0},
+       {MMP2_CLK_PLL2_4, "pll2_4", "pll2_2", 1, 2, 0},
+       {MMP2_CLK_PLL2_8, "pll2_8", "pll2_4", 1, 2, 0},
+       {MMP2_CLK_PLL2_16, "pll2_16", "pll2_8", 1, 2, 0},
+       {MMP2_CLK_PLL2_3, "pll2_3", "pll2", 1, 3, 0},
+       {MMP2_CLK_PLL2_6, "pll2_6", "pll2_3", 1, 2, 0},
+       {MMP2_CLK_PLL2_12, "pll2_12", "pll2_6", 1, 2, 0},
+       {MMP2_CLK_VCTCXO_2, "vctcxo_2", "vctcxo", 1, 2, 0},
+       {MMP2_CLK_VCTCXO_4, "vctcxo_4", "vctcxo_2", 1, 2, 0},
+};
+
+static struct mmp_clk_factor_masks uart_factor_masks = {
+       .factor = 2,
+       .num_mask = 0x1fff,
+       .den_mask = 0x1fff,
+       .num_shift = 16,
+       .den_shift = 0,
+};
+
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
+       {.num = 14634, .den = 2165},    /*14.745MHZ */
+       {.num = 3521, .den = 689},      /*19.23MHZ */
+       {.num = 9679, .den = 5728},     /*58.9824MHZ */
+       {.num = 15850, .den = 9451},    /*59.429MHZ */
+};
+
+static void mmp2_pll_init(struct mmp2_clk_unit *pxa_unit)
+{
+       struct clk *clk;
+       struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+       mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
+                                       ARRAY_SIZE(fixed_rate_clks));
+
+       mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
+                                       ARRAY_SIZE(fixed_factor_clks));
+
+       clk = mmp_clk_register_factor("uart_pll", "pll1_4",
+                               CLK_SET_RATE_PARENT,
+                               pxa_unit->mpmu_base + MPMU_UART_PLL,
+                               &uart_factor_masks, uart_factor_tbl,
+                               ARRAY_SIZE(uart_factor_tbl), NULL);
+       mmp_clk_add(unit, MMP2_CLK_UART_PLL, clk);
+}
+
+static DEFINE_SPINLOCK(uart0_lock);
+static DEFINE_SPINLOCK(uart1_lock);
+static DEFINE_SPINLOCK(uart2_lock);
+static const char *uart_parent_names[] = {"uart_pll", "vctcxo"};
+
+static DEFINE_SPINLOCK(ssp0_lock);
+static DEFINE_SPINLOCK(ssp1_lock);
+static DEFINE_SPINLOCK(ssp2_lock);
+static DEFINE_SPINLOCK(ssp3_lock);
+static const char *ssp_parent_names[] = {"vctcxo_4", "vctcxo_2", "vctcxo", "pll1_16"};
+
+static DEFINE_SPINLOCK(reset_lock);
+
+static struct mmp_param_mux_clk apbc_mux_clks[] = {
+       {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
+       {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
+       {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART2, 4, 3, 0, &uart2_lock},
+       {0, "uart3_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART3, 4, 3, 0, &uart2_lock},
+       {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
+       {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
+       {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP2, 4, 3, 0, &ssp2_lock},
+       {0, "ssp3_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP3, 4, 3, 0, &ssp3_lock},
+};
+
+static struct mmp_param_gate_clk apbc_gate_clks[] = {
+       {MMP2_CLK_TWSI0, "twsi0_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x7, 0x3, 0x0, 0, &reset_lock},
+       {MMP2_CLK_TWSI1, "twsi1_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x7, 0x3, 0x0, 0, &reset_lock},
+       {MMP2_CLK_TWSI2, "twsi2_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI2, 0x7, 0x3, 0x0, 0, &reset_lock},
+       {MMP2_CLK_TWSI3, "twsi3_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI3, 0x7, 0x3, 0x0, 0, &reset_lock},
+       {MMP2_CLK_TWSI4, "twsi4_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI4, 0x7, 0x3, 0x0, 0, &reset_lock},
+       {MMP2_CLK_TWSI5, "twsi5_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_TWSI5, 0x7, 0x3, 0x0, 0, &reset_lock},
+       {MMP2_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x7, 0x3, 0x0, 0, &reset_lock},
+       {MMP2_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x7, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
+       {MMP2_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x87, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, &reset_lock},
+       {MMP2_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x7, 0x3, 0x0, 0, &reset_lock},
+       {MMP2_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x7, 0x3, 0x0, 0, &reset_lock},
+       {MMP2_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x7, 0x3, 0x0, 0, &reset_lock},
+       {MMP2_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x7, 0x3, 0x0, 0, &reset_lock},
+       /* The gate clocks has mux parent. */
+       {MMP2_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x7, 0x3, 0x0, 0, &uart0_lock},
+       {MMP2_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x7, 0x3, 0x0, 0, &uart1_lock},
+       {MMP2_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBC_UART2, 0x7, 0x3, 0x0, 0, &uart2_lock},
+       {MMP2_CLK_UART3, "uart3_clk", "uart3_mux", CLK_SET_RATE_PARENT, APBC_UART3, 0x7, 0x3, 0x0, 0, &uart2_lock},
+       {MMP2_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x7, 0x3, 0x0, 0, &ssp0_lock},
+       {MMP2_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x7, 0x3, 0x0, 0, &ssp1_lock},
+       {MMP2_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x7, 0x3, 0x0, 0, &ssp2_lock},
+       {MMP2_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x7, 0x3, 0x0, 0, &ssp3_lock},
+};
+
+static void mmp2_apb_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
+{
+       struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+       mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
+                               ARRAY_SIZE(apbc_mux_clks));
+
+       mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
+                               ARRAY_SIZE(apbc_gate_clks));
+}
+
+static DEFINE_SPINLOCK(sdh_lock);
+static const char *sdh_parent_names[] = {"pll1_4", "pll2", "usb_pll", "pll1"};
+static struct mmp_clk_mix_config sdh_mix_config = {
+       .reg_info = DEFINE_MIX_REG_INFO(4, 10, 2, 8, 32),
+};
+
+static DEFINE_SPINLOCK(usb_lock);
+
+static DEFINE_SPINLOCK(disp0_lock);
+static DEFINE_SPINLOCK(disp1_lock);
+static const char *disp_parent_names[] = {"pll1", "pll1_16", "pll2", "vctcxo"};
+
+static DEFINE_SPINLOCK(ccic0_lock);
+static DEFINE_SPINLOCK(ccic1_lock);
+static const char *ccic_parent_names[] = {"pll1_2", "pll1_16", "vctcxo"};
+static struct mmp_clk_mix_config ccic0_mix_config = {
+       .reg_info = DEFINE_MIX_REG_INFO(4, 17, 2, 6, 32),
+};
+static struct mmp_clk_mix_config ccic1_mix_config = {
+       .reg_info = DEFINE_MIX_REG_INFO(4, 16, 2, 6, 32),
+};
+
+static struct mmp_param_mux_clk apmu_mux_clks[] = {
+       {MMP2_CLK_DISP0_MUX, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 2, 0, &disp0_lock},
+       {MMP2_CLK_DISP1_MUX, "disp1_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP1, 6, 2, 0, &disp1_lock},
+};
+
+static struct mmp_param_div_clk apmu_div_clks[] = {
+       {0, "disp0_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 8, 4, 0, &disp0_lock},
+       {0, "disp0_sphy_div", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 15, 5, 0, &disp0_lock},
+       {0, "disp1_div", "disp1_mux", CLK_SET_RATE_PARENT, APMU_DISP1, 8, 4, 0, &disp1_lock},
+       {0, "ccic0_sphy_div", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
+       {0, "ccic1_sphy_div", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 10, 5, 0, &ccic1_lock},
+};
+
+static struct mmp_param_gate_clk apmu_gate_clks[] = {
+       {MMP2_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
+       /* The gate clocks has mux parent. */
+       {MMP2_CLK_SDH0, "sdh0_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+       {MMP2_CLK_SDH1, "sdh1_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+       {MMP2_CLK_SDH1, "sdh2_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH2, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+       {MMP2_CLK_SDH1, "sdh3_clk", "sdh_mix_clk", CLK_SET_RATE_PARENT, APMU_SDH3, 0x1b, 0x1b, 0x0, 0, &sdh_lock},
+       {MMP2_CLK_DISP0, "disp0_clk", "disp0_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
+       {MMP2_CLK_DISP0_SPHY, "disp0_sphy_clk", "disp0_sphy_div", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1024, 0x1024, 0x0, 0, &disp0_lock},
+       {MMP2_CLK_DISP1, "disp1_clk", "disp1_div", CLK_SET_RATE_PARENT, APMU_DISP1, 0x1b, 0x1b, 0x0, 0, &disp1_lock},
+       {MMP2_CLK_CCIC_ARBITER, "ccic_arbiter", "vctcxo", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1800, 0x1800, 0x0, 0, &ccic0_lock},
+       {MMP2_CLK_CCIC0, "ccic0_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
+       {MMP2_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
+       {MMP2_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
+       {MMP2_CLK_CCIC1, "ccic1_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x1b, 0x1b, 0x0, 0, &ccic1_lock},
+       {MMP2_CLK_CCIC1_PHY, "ccic1_phy_clk", "ccic1_mix_clk", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x24, 0x24, 0x0, 0, &ccic1_lock},
+       {MMP2_CLK_CCIC1_SPHY, "ccic1_sphy_clk", "ccic1_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC1, 0x300, 0x300, 0x0, 0, &ccic1_lock},
+};
+
+static void mmp2_axi_periph_clk_init(struct mmp2_clk_unit *pxa_unit)
+{
+       struct clk *clk;
+       struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+       sdh_mix_config.reg_info.reg_clk_ctrl = pxa_unit->apmu_base + APMU_SDH0;
+       clk = mmp_clk_register_mix(NULL, "sdh_mix_clk", sdh_parent_names,
+                                       ARRAY_SIZE(sdh_parent_names),
+                                       CLK_SET_RATE_PARENT,
+                                       &sdh_mix_config, &sdh_lock);
+
+       ccic0_mix_config.reg_info.reg_clk_ctrl = pxa_unit->apmu_base + APMU_CCIC0;
+       clk = mmp_clk_register_mix(NULL, "ccic0_mix_clk", ccic_parent_names,
+                                       ARRAY_SIZE(ccic_parent_names),
+                                       CLK_SET_RATE_PARENT,
+                                       &ccic0_mix_config, &ccic0_lock);
+       mmp_clk_add(unit, MMP2_CLK_CCIC0_MIX, clk);
+
+       ccic1_mix_config.reg_info.reg_clk_ctrl = pxa_unit->apmu_base + APMU_CCIC1;
+       clk = mmp_clk_register_mix(NULL, "ccic1_mix_clk", ccic_parent_names,
+                                       ARRAY_SIZE(ccic_parent_names),
+                                       CLK_SET_RATE_PARENT,
+                                       &ccic1_mix_config, &ccic1_lock);
+       mmp_clk_add(unit, MMP2_CLK_CCIC1_MIX, clk);
+
+       mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
+                               ARRAY_SIZE(apmu_mux_clks));
+
+       mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
+                               ARRAY_SIZE(apmu_div_clks));
+
+       mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
+                               ARRAY_SIZE(apmu_gate_clks));
+}
+
+static void mmp2_clk_reset_init(struct device_node *np,
+                               struct mmp2_clk_unit *pxa_unit)
+{
+       struct mmp_clk_reset_cell *cells;
+       int i, nr_resets;
+
+       nr_resets = ARRAY_SIZE(apbc_gate_clks);
+       cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
+       if (!cells)
+               return;
+
+       for (i = 0; i < nr_resets; i++) {
+               cells[i].clk_id = apbc_gate_clks[i].id;
+               cells[i].reg = pxa_unit->apbc_base + apbc_gate_clks[i].offset;
+               cells[i].flags = 0;
+               cells[i].lock = apbc_gate_clks[i].lock;
+               cells[i].bits = 0x4;
+       }
+
+       mmp_clk_reset_register(np, cells, nr_resets);
+}
+
+static void __init mmp2_clk_init(struct device_node *np)
+{
+       struct mmp2_clk_unit *pxa_unit;
+
+       pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
+       if (!pxa_unit)
+               return;
+
+       pxa_unit->mpmu_base = of_iomap(np, 0);
+       if (!pxa_unit->mpmu_base) {
+               pr_err("failed to map mpmu registers\n");
+               return;
+       }
+
+       pxa_unit->apmu_base = of_iomap(np, 1);
+       if (!pxa_unit->mpmu_base) {
+               pr_err("failed to map apmu registers\n");
+               return;
+       }
+
+       pxa_unit->apbc_base = of_iomap(np, 2);
+       if (!pxa_unit->apbc_base) {
+               pr_err("failed to map apbc registers\n");
+               return;
+       }
+
+       mmp_clk_init(np, &pxa_unit->unit, MMP2_NR_CLKS);
+
+       mmp2_pll_init(pxa_unit);
+
+       mmp2_apb_periph_clk_init(pxa_unit);
+
+       mmp2_axi_periph_clk_init(pxa_unit);
+
+       mmp2_clk_reset_init(np, pxa_unit);
+}
+
+CLK_OF_DECLARE(mmp2_clk, "marvell,mmp2-clock", mmp2_clk_init);
diff --git a/drivers/clk/mmp/clk-of-pxa168.c b/drivers/clk/mmp/clk-of-pxa168.c
new file mode 100644 (file)
index 0000000..5b1810d
--- /dev/null
@@ -0,0 +1,279 @@
+/*
+ * pxa168 clock framework source file
+ *
+ * Copyright (C) 2012 Marvell
+ * Chao Xie <xiechao.mail@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/marvell,pxa168.h>
+
+#include "clk.h"
+#include "reset.h"
+
+#define APBC_RTC       0x28
+#define APBC_TWSI0     0x2c
+#define APBC_KPC       0x30
+#define APBC_UART0     0x0
+#define APBC_UART1     0x4
+#define APBC_GPIO      0x8
+#define APBC_PWM0      0xc
+#define APBC_PWM1      0x10
+#define APBC_PWM2      0x14
+#define APBC_PWM3      0x18
+#define APBC_SSP0      0x81c
+#define APBC_SSP1      0x820
+#define APBC_SSP2      0x84c
+#define APBC_SSP3      0x858
+#define APBC_SSP4      0x85c
+#define APBC_TWSI1     0x6c
+#define APBC_UART2     0x70
+#define APMU_SDH0      0x54
+#define APMU_SDH1      0x58
+#define APMU_USB       0x5c
+#define APMU_DISP0     0x4c
+#define APMU_CCIC0     0x50
+#define APMU_DFC       0x60
+#define MPMU_UART_PLL  0x14
+
+struct pxa168_clk_unit {
+       struct mmp_clk_unit unit;
+       void __iomem *mpmu_base;
+       void __iomem *apmu_base;
+       void __iomem *apbc_base;
+};
+
+static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
+       {PXA168_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
+       {PXA168_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
+       {PXA168_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
+};
+
+static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
+       {PXA168_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
+       {PXA168_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
+       {PXA168_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
+       {PXA168_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
+       {PXA168_CLK_PLL1_6, "pll1_6", "pll1_2", 1, 3, 0},
+       {PXA168_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
+       {PXA168_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0},
+       {PXA168_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0},
+       {PXA168_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
+       {PXA168_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
+       {PXA168_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
+       {PXA168_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
+       {PXA168_CLK_PLL1_3_16, "pll1_3_16", "pll1", 3, 16, 0},
+};
+
+static struct mmp_clk_factor_masks uart_factor_masks = {
+       .factor = 2,
+       .num_mask = 0x1fff,
+       .den_mask = 0x1fff,
+       .num_shift = 16,
+       .den_shift = 0,
+};
+
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
+       {.num = 8125, .den = 1536},     /*14.745MHZ */
+};
+
+static void pxa168_pll_init(struct pxa168_clk_unit *pxa_unit)
+{
+       struct clk *clk;
+       struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+       mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
+                                       ARRAY_SIZE(fixed_rate_clks));
+
+       mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
+                                       ARRAY_SIZE(fixed_factor_clks));
+
+       clk = mmp_clk_register_factor("uart_pll", "pll1_4",
+                               CLK_SET_RATE_PARENT,
+                               pxa_unit->mpmu_base + MPMU_UART_PLL,
+                               &uart_factor_masks, uart_factor_tbl,
+                               ARRAY_SIZE(uart_factor_tbl), NULL);
+       mmp_clk_add(unit, PXA168_CLK_UART_PLL, clk);
+}
+
+static DEFINE_SPINLOCK(uart0_lock);
+static DEFINE_SPINLOCK(uart1_lock);
+static DEFINE_SPINLOCK(uart2_lock);
+static const char *uart_parent_names[] = {"pll1_3_16", "uart_pll"};
+
+static DEFINE_SPINLOCK(ssp0_lock);
+static DEFINE_SPINLOCK(ssp1_lock);
+static DEFINE_SPINLOCK(ssp2_lock);
+static DEFINE_SPINLOCK(ssp3_lock);
+static DEFINE_SPINLOCK(ssp4_lock);
+static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
+
+static DEFINE_SPINLOCK(reset_lock);
+
+static struct mmp_param_mux_clk apbc_mux_clks[] = {
+       {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
+       {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
+       {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART2, 4, 3, 0, &uart2_lock},
+       {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
+       {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
+       {0, "ssp2_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP2, 4, 3, 0, &ssp2_lock},
+       {0, "ssp3_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP3, 4, 3, 0, &ssp3_lock},
+       {0, "ssp4_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP4, 4, 3, 0, &ssp4_lock},
+};
+
+static struct mmp_param_gate_clk apbc_gate_clks[] = {
+       {PXA168_CLK_TWSI0, "twsi0_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA168_CLK_TWSI1, "twsi1_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI1, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA168_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA168_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
+       {PXA168_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
+       {PXA168_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA168_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA168_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA168_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &reset_lock},
+       /* The gate clocks has mux parent. */
+       {PXA168_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x3, 0x3, 0x0, 0, &uart0_lock},
+       {PXA168_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock},
+       {PXA168_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBC_UART2, 0x3, 0x3, 0x0, 0, &uart2_lock},
+       {PXA168_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x3, 0x3, 0x0, 0, &ssp0_lock},
+       {PXA168_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x3, 0x3, 0x0, 0, &ssp1_lock},
+       {PXA168_CLK_SSP2, "ssp2_clk", "ssp2_mux", CLK_SET_RATE_PARENT, APBC_SSP2, 0x3, 0x3, 0x0, 0, &ssp2_lock},
+       {PXA168_CLK_SSP3, "ssp3_clk", "ssp3_mux", CLK_SET_RATE_PARENT, APBC_SSP3, 0x3, 0x3, 0x0, 0, &ssp3_lock},
+       {PXA168_CLK_SSP4, "ssp4_clk", "ssp4_mux", CLK_SET_RATE_PARENT, APBC_SSP4, 0x3, 0x3, 0x0, 0, &ssp4_lock},
+};
+
+static void pxa168_apb_periph_clk_init(struct pxa168_clk_unit *pxa_unit)
+{
+       struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+       mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
+                               ARRAY_SIZE(apbc_mux_clks));
+
+       mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
+                               ARRAY_SIZE(apbc_gate_clks));
+
+}
+
+static DEFINE_SPINLOCK(sdh0_lock);
+static DEFINE_SPINLOCK(sdh1_lock);
+static const char *sdh_parent_names[] = {"pll1_12", "pll1_13"};
+
+static DEFINE_SPINLOCK(usb_lock);
+
+static DEFINE_SPINLOCK(disp0_lock);
+static const char *disp_parent_names[] = {"pll1_2", "pll1_12"};
+
+static DEFINE_SPINLOCK(ccic0_lock);
+static const char *ccic_parent_names[] = {"pll1_2", "pll1_12"};
+static const char *ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
+
+static struct mmp_param_mux_clk apmu_mux_clks[] = {
+       {0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 1, 0, &sdh0_lock},
+       {0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 1, 0, &sdh1_lock},
+       {0, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 1, 0, &disp0_lock},
+       {0, "ccic0_mux", ccic_parent_names, ARRAY_SIZE(ccic_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 6, 1, 0, &ccic0_lock},
+       {0, "ccic0_phy_mux", ccic_phy_parent_names, ARRAY_SIZE(ccic_phy_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 7, 1, 0, &ccic0_lock},
+};
+
+static struct mmp_param_div_clk apmu_div_clks[] = {
+       {0, "ccic0_sphy_div", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
+};
+
+static struct mmp_param_gate_clk apmu_gate_clks[] = {
+       {PXA168_CLK_DFC, "dfc_clk", "pll1_4", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, NULL},
+       {PXA168_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
+       {PXA168_CLK_SPH, "sph_clk", "usb_pll", 0, APMU_USB, 0x12, 0x12, 0x0, 0, &usb_lock},
+       /* The gate clocks has mux parent. */
+       {PXA168_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh0_lock},
+       {PXA168_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh1_lock},
+       {PXA168_CLK_DISP0, "disp0_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
+       {PXA168_CLK_CCIC0, "ccic0_clk", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
+       {PXA168_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_phy_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
+       {PXA168_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
+};
+
+static void pxa168_axi_periph_clk_init(struct pxa168_clk_unit *pxa_unit)
+{
+       struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+       mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
+                               ARRAY_SIZE(apmu_mux_clks));
+
+       mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
+                               ARRAY_SIZE(apmu_div_clks));
+
+       mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
+                               ARRAY_SIZE(apmu_gate_clks));
+}
+
+static void pxa168_clk_reset_init(struct device_node *np,
+                               struct pxa168_clk_unit *pxa_unit)
+{
+       struct mmp_clk_reset_cell *cells;
+       int i, nr_resets;
+
+       nr_resets = ARRAY_SIZE(apbc_gate_clks);
+       cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
+       if (!cells)
+               return;
+
+       for (i = 0; i < nr_resets; i++) {
+               cells[i].clk_id = apbc_gate_clks[i].id;
+               cells[i].reg = pxa_unit->apbc_base + apbc_gate_clks[i].offset;
+               cells[i].flags = 0;
+               cells[i].lock = apbc_gate_clks[i].lock;
+               cells[i].bits = 0x4;
+       }
+
+       mmp_clk_reset_register(np, cells, nr_resets);
+}
+
+static void __init pxa168_clk_init(struct device_node *np)
+{
+       struct pxa168_clk_unit *pxa_unit;
+
+       pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
+       if (!pxa_unit)
+               return;
+
+       pxa_unit->mpmu_base = of_iomap(np, 0);
+       if (!pxa_unit->mpmu_base) {
+               pr_err("failed to map mpmu registers\n");
+               return;
+       }
+
+       pxa_unit->apmu_base = of_iomap(np, 1);
+       if (!pxa_unit->mpmu_base) {
+               pr_err("failed to map apmu registers\n");
+               return;
+       }
+
+       pxa_unit->apbc_base = of_iomap(np, 2);
+       if (!pxa_unit->apbc_base) {
+               pr_err("failed to map apbc registers\n");
+               return;
+       }
+
+       mmp_clk_init(np, &pxa_unit->unit, PXA168_NR_CLKS);
+
+       pxa168_pll_init(pxa_unit);
+
+       pxa168_apb_periph_clk_init(pxa_unit);
+
+       pxa168_axi_periph_clk_init(pxa_unit);
+
+       pxa168_clk_reset_init(np, pxa_unit);
+}
+
+CLK_OF_DECLARE(pxa168_clk, "marvell,pxa168-clock", pxa168_clk_init);
diff --git a/drivers/clk/mmp/clk-of-pxa910.c b/drivers/clk/mmp/clk-of-pxa910.c
new file mode 100644 (file)
index 0000000..5e3c80d
--- /dev/null
@@ -0,0 +1,301 @@
+/*
+ * pxa910 clock framework source file
+ *
+ * Copyright (C) 2012 Marvell
+ * Chao Xie <xiechao.mail@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/of_address.h>
+
+#include <dt-bindings/clock/marvell,pxa910.h>
+
+#include "clk.h"
+#include "reset.h"
+
+#define APBC_RTC       0x28
+#define APBC_TWSI0     0x2c
+#define APBC_KPC       0x18
+#define APBC_UART0     0x0
+#define APBC_UART1     0x4
+#define APBC_GPIO      0x8
+#define APBC_PWM0      0xc
+#define APBC_PWM1      0x10
+#define APBC_PWM2      0x14
+#define APBC_PWM3      0x18
+#define APBC_SSP0      0x1c
+#define APBC_SSP1      0x20
+#define APBC_SSP2      0x4c
+#define APBCP_TWSI1    0x28
+#define APBCP_UART2    0x1c
+#define APMU_SDH0      0x54
+#define APMU_SDH1      0x58
+#define APMU_USB       0x5c
+#define APMU_DISP0     0x4c
+#define APMU_CCIC0     0x50
+#define APMU_DFC       0x60
+#define MPMU_UART_PLL  0x14
+
+struct pxa910_clk_unit {
+       struct mmp_clk_unit unit;
+       void __iomem *mpmu_base;
+       void __iomem *apmu_base;
+       void __iomem *apbc_base;
+       void __iomem *apbcp_base;
+};
+
+static struct mmp_param_fixed_rate_clk fixed_rate_clks[] = {
+       {PXA910_CLK_CLK32, "clk32", NULL, CLK_IS_ROOT, 32768},
+       {PXA910_CLK_VCTCXO, "vctcxo", NULL, CLK_IS_ROOT, 26000000},
+       {PXA910_CLK_PLL1, "pll1", NULL, CLK_IS_ROOT, 624000000},
+};
+
+static struct mmp_param_fixed_factor_clk fixed_factor_clks[] = {
+       {PXA910_CLK_PLL1_2, "pll1_2", "pll1", 1, 2, 0},
+       {PXA910_CLK_PLL1_4, "pll1_4", "pll1_2", 1, 2, 0},
+       {PXA910_CLK_PLL1_8, "pll1_8", "pll1_4", 1, 2, 0},
+       {PXA910_CLK_PLL1_16, "pll1_16", "pll1_8", 1, 2, 0},
+       {PXA910_CLK_PLL1_6, "pll1_6", "pll1_2", 1, 3, 0},
+       {PXA910_CLK_PLL1_12, "pll1_12", "pll1_6", 1, 2, 0},
+       {PXA910_CLK_PLL1_24, "pll1_24", "pll1_12", 1, 2, 0},
+       {PXA910_CLK_PLL1_48, "pll1_48", "pll1_24", 1, 2, 0},
+       {PXA910_CLK_PLL1_96, "pll1_96", "pll1_48", 1, 2, 0},
+       {PXA910_CLK_PLL1_13, "pll1_13", "pll1", 1, 13, 0},
+       {PXA910_CLK_PLL1_13_1_5, "pll1_13_1_5", "pll1_13", 2, 3, 0},
+       {PXA910_CLK_PLL1_2_1_5, "pll1_2_1_5", "pll1_2", 2, 3, 0},
+       {PXA910_CLK_PLL1_3_16, "pll1_3_16", "pll1", 3, 16, 0},
+};
+
+static struct mmp_clk_factor_masks uart_factor_masks = {
+       .factor = 2,
+       .num_mask = 0x1fff,
+       .den_mask = 0x1fff,
+       .num_shift = 16,
+       .den_shift = 0,
+};
+
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
+       {.num = 8125, .den = 1536},     /*14.745MHZ */
+};
+
+static void pxa910_pll_init(struct pxa910_clk_unit *pxa_unit)
+{
+       struct clk *clk;
+       struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+       mmp_register_fixed_rate_clks(unit, fixed_rate_clks,
+                                       ARRAY_SIZE(fixed_rate_clks));
+
+       mmp_register_fixed_factor_clks(unit, fixed_factor_clks,
+                                       ARRAY_SIZE(fixed_factor_clks));
+
+       clk = mmp_clk_register_factor("uart_pll", "pll1_4",
+                               CLK_SET_RATE_PARENT,
+                               pxa_unit->mpmu_base + MPMU_UART_PLL,
+                               &uart_factor_masks, uart_factor_tbl,
+                               ARRAY_SIZE(uart_factor_tbl), NULL);
+       mmp_clk_add(unit, PXA910_CLK_UART_PLL, clk);
+}
+
+static DEFINE_SPINLOCK(uart0_lock);
+static DEFINE_SPINLOCK(uart1_lock);
+static DEFINE_SPINLOCK(uart2_lock);
+static const char *uart_parent_names[] = {"pll1_3_16", "uart_pll"};
+
+static DEFINE_SPINLOCK(ssp0_lock);
+static DEFINE_SPINLOCK(ssp1_lock);
+static const char *ssp_parent_names[] = {"pll1_96", "pll1_48", "pll1_24", "pll1_12"};
+
+static DEFINE_SPINLOCK(reset_lock);
+
+static struct mmp_param_mux_clk apbc_mux_clks[] = {
+       {0, "uart0_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART0, 4, 3, 0, &uart0_lock},
+       {0, "uart1_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBC_UART1, 4, 3, 0, &uart1_lock},
+       {0, "ssp0_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP0, 4, 3, 0, &ssp0_lock},
+       {0, "ssp1_mux", ssp_parent_names, ARRAY_SIZE(ssp_parent_names), CLK_SET_RATE_PARENT, APBC_SSP1, 4, 3, 0, &ssp1_lock},
+};
+
+static struct mmp_param_mux_clk apbcp_mux_clks[] = {
+       {0, "uart2_mux", uart_parent_names, ARRAY_SIZE(uart_parent_names), CLK_SET_RATE_PARENT, APBCP_UART2, 4, 3, 0, &uart2_lock},
+};
+
+static struct mmp_param_gate_clk apbc_gate_clks[] = {
+       {PXA910_CLK_TWSI0, "twsi0_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBC_TWSI0, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA910_CLK_GPIO, "gpio_clk", "vctcxo", CLK_SET_RATE_PARENT, APBC_GPIO, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA910_CLK_KPC, "kpc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_KPC, 0x3, 0x3, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
+       {PXA910_CLK_RTC, "rtc_clk", "clk32", CLK_SET_RATE_PARENT, APBC_RTC, 0x83, 0x83, 0x0, MMP_CLK_GATE_NEED_DELAY, NULL},
+       {PXA910_CLK_PWM0, "pwm0_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM0, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA910_CLK_PWM1, "pwm1_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM1, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA910_CLK_PWM2, "pwm2_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM2, 0x3, 0x3, 0x0, 0, &reset_lock},
+       {PXA910_CLK_PWM3, "pwm3_clk", "pll1_48", CLK_SET_RATE_PARENT, APBC_PWM3, 0x3, 0x3, 0x0, 0, &reset_lock},
+       /* The gate clocks has mux parent. */
+       {PXA910_CLK_UART0, "uart0_clk", "uart0_mux", CLK_SET_RATE_PARENT, APBC_UART0, 0x3, 0x3, 0x0, 0, &uart0_lock},
+       {PXA910_CLK_UART1, "uart1_clk", "uart1_mux", CLK_SET_RATE_PARENT, APBC_UART1, 0x3, 0x3, 0x0, 0, &uart1_lock},
+       {PXA910_CLK_SSP0, "ssp0_clk", "ssp0_mux", CLK_SET_RATE_PARENT, APBC_SSP0, 0x3, 0x3, 0x0, 0, &ssp0_lock},
+       {PXA910_CLK_SSP1, "ssp1_clk", "ssp1_mux", CLK_SET_RATE_PARENT, APBC_SSP1, 0x3, 0x3, 0x0, 0, &ssp1_lock},
+};
+
+static struct mmp_param_gate_clk apbcp_gate_clks[] = {
+       {PXA910_CLK_TWSI1, "twsi1_clk", "pll1_13_1_5", CLK_SET_RATE_PARENT, APBCP_TWSI1, 0x3, 0x3, 0x0, 0, &reset_lock},
+       /* The gate clocks has mux parent. */
+       {PXA910_CLK_UART2, "uart2_clk", "uart2_mux", CLK_SET_RATE_PARENT, APBCP_UART2, 0x3, 0x3, 0x0, 0, &uart2_lock},
+};
+
+static void pxa910_apb_periph_clk_init(struct pxa910_clk_unit *pxa_unit)
+{
+       struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+       mmp_register_mux_clks(unit, apbc_mux_clks, pxa_unit->apbc_base,
+                               ARRAY_SIZE(apbc_mux_clks));
+
+       mmp_register_mux_clks(unit, apbcp_mux_clks, pxa_unit->apbcp_base,
+                               ARRAY_SIZE(apbcp_mux_clks));
+
+       mmp_register_gate_clks(unit, apbc_gate_clks, pxa_unit->apbc_base,
+                               ARRAY_SIZE(apbc_gate_clks));
+
+       mmp_register_gate_clks(unit, apbcp_gate_clks, pxa_unit->apbcp_base,
+                               ARRAY_SIZE(apbcp_gate_clks));
+}
+
+static DEFINE_SPINLOCK(sdh0_lock);
+static DEFINE_SPINLOCK(sdh1_lock);
+static const char *sdh_parent_names[] = {"pll1_12", "pll1_13"};
+
+static DEFINE_SPINLOCK(usb_lock);
+
+static DEFINE_SPINLOCK(disp0_lock);
+static const char *disp_parent_names[] = {"pll1_2", "pll1_12"};
+
+static DEFINE_SPINLOCK(ccic0_lock);
+static const char *ccic_parent_names[] = {"pll1_2", "pll1_12"};
+static const char *ccic_phy_parent_names[] = {"pll1_6", "pll1_12"};
+
+static struct mmp_param_mux_clk apmu_mux_clks[] = {
+       {0, "sdh0_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH0, 6, 1, 0, &sdh0_lock},
+       {0, "sdh1_mux", sdh_parent_names, ARRAY_SIZE(sdh_parent_names), CLK_SET_RATE_PARENT, APMU_SDH1, 6, 1, 0, &sdh1_lock},
+       {0, "disp0_mux", disp_parent_names, ARRAY_SIZE(disp_parent_names), CLK_SET_RATE_PARENT, APMU_DISP0, 6, 1, 0, &disp0_lock},
+       {0, "ccic0_mux", ccic_parent_names, ARRAY_SIZE(ccic_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 6, 1, 0, &ccic0_lock},
+       {0, "ccic0_phy_mux", ccic_phy_parent_names, ARRAY_SIZE(ccic_phy_parent_names), CLK_SET_RATE_PARENT, APMU_CCIC0, 7, 1, 0, &ccic0_lock},
+};
+
+static struct mmp_param_div_clk apmu_div_clks[] = {
+       {0, "ccic0_sphy_div", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 10, 5, 0, &ccic0_lock},
+};
+
+static struct mmp_param_gate_clk apmu_gate_clks[] = {
+       {PXA910_CLK_DFC, "dfc_clk", "pll1_4", CLK_SET_RATE_PARENT, APMU_DFC, 0x19b, 0x19b, 0x0, 0, NULL},
+       {PXA910_CLK_USB, "usb_clk", "usb_pll", 0, APMU_USB, 0x9, 0x9, 0x0, 0, &usb_lock},
+       {PXA910_CLK_SPH, "sph_clk", "usb_pll", 0, APMU_USB, 0x12, 0x12, 0x0, 0, &usb_lock},
+       /* The gate clocks has mux parent. */
+       {PXA910_CLK_SDH0, "sdh0_clk", "sdh0_mux", CLK_SET_RATE_PARENT, APMU_SDH0, 0x1b, 0x1b, 0x0, 0, &sdh0_lock},
+       {PXA910_CLK_SDH1, "sdh1_clk", "sdh1_mux", CLK_SET_RATE_PARENT, APMU_SDH1, 0x1b, 0x1b, 0x0, 0, &sdh1_lock},
+       {PXA910_CLK_DISP0, "disp0_clk", "disp0_mux", CLK_SET_RATE_PARENT, APMU_DISP0, 0x1b, 0x1b, 0x0, 0, &disp0_lock},
+       {PXA910_CLK_CCIC0, "ccic0_clk", "ccic0_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x1b, 0x1b, 0x0, 0, &ccic0_lock},
+       {PXA910_CLK_CCIC0_PHY, "ccic0_phy_clk", "ccic0_phy_mux", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x24, 0x24, 0x0, 0, &ccic0_lock},
+       {PXA910_CLK_CCIC0_SPHY, "ccic0_sphy_clk", "ccic0_sphy_div", CLK_SET_RATE_PARENT, APMU_CCIC0, 0x300, 0x300, 0x0, 0, &ccic0_lock},
+};
+
+static void pxa910_axi_periph_clk_init(struct pxa910_clk_unit *pxa_unit)
+{
+       struct mmp_clk_unit *unit = &pxa_unit->unit;
+
+       mmp_register_mux_clks(unit, apmu_mux_clks, pxa_unit->apmu_base,
+                               ARRAY_SIZE(apmu_mux_clks));
+
+       mmp_register_div_clks(unit, apmu_div_clks, pxa_unit->apmu_base,
+                               ARRAY_SIZE(apmu_div_clks));
+
+       mmp_register_gate_clks(unit, apmu_gate_clks, pxa_unit->apmu_base,
+                               ARRAY_SIZE(apmu_gate_clks));
+}
+
+static void pxa910_clk_reset_init(struct device_node *np,
+                               struct pxa910_clk_unit *pxa_unit)
+{
+       struct mmp_clk_reset_cell *cells;
+       int i, base, nr_resets_apbc, nr_resets_apbcp, nr_resets;
+
+       nr_resets_apbc = ARRAY_SIZE(apbc_gate_clks);
+       nr_resets_apbcp = ARRAY_SIZE(apbcp_gate_clks);
+       nr_resets = nr_resets_apbc + nr_resets_apbcp;
+       cells = kcalloc(nr_resets, sizeof(*cells), GFP_KERNEL);
+       if (!cells)
+               return;
+
+       base = 0;
+       for (i = 0; i < nr_resets_apbc; i++) {
+               cells[base + i].clk_id = apbc_gate_clks[i].id;
+               cells[base + i].reg =
+                       pxa_unit->apbc_base + apbc_gate_clks[i].offset;
+               cells[base + i].flags = 0;
+               cells[base + i].lock = apbc_gate_clks[i].lock;
+               cells[base + i].bits = 0x4;
+       }
+
+       base = nr_resets_apbc;
+       for (i = 0; i < nr_resets_apbcp; i++) {
+               cells[base + i].clk_id = apbcp_gate_clks[i].id;
+               cells[base + i].reg =
+                       pxa_unit->apbc_base + apbc_gate_clks[i].offset;
+               cells[base + i].flags = 0;
+               cells[base + i].lock = apbc_gate_clks[i].lock;
+               cells[base + i].bits = 0x4;
+       }
+
+       mmp_clk_reset_register(np, cells, nr_resets);
+}
+
+static void __init pxa910_clk_init(struct device_node *np)
+{
+       struct pxa910_clk_unit *pxa_unit;
+
+       pxa_unit = kzalloc(sizeof(*pxa_unit), GFP_KERNEL);
+       if (!pxa_unit)
+               return;
+
+       pxa_unit->mpmu_base = of_iomap(np, 0);
+       if (!pxa_unit->mpmu_base) {
+               pr_err("failed to map mpmu registers\n");
+               return;
+       }
+
+       pxa_unit->apmu_base = of_iomap(np, 1);
+       if (!pxa_unit->mpmu_base) {
+               pr_err("failed to map apmu registers\n");
+               return;
+       }
+
+       pxa_unit->apbc_base = of_iomap(np, 2);
+       if (!pxa_unit->apbc_base) {
+               pr_err("failed to map apbc registers\n");
+               return;
+       }
+
+       pxa_unit->apbcp_base = of_iomap(np, 3);
+       if (!pxa_unit->mpmu_base) {
+               pr_err("failed to map apbcp registers\n");
+               return;
+       }
+
+       mmp_clk_init(np, &pxa_unit->unit, PXA910_NR_CLKS);
+
+       pxa910_pll_init(pxa_unit);
+
+       pxa910_apb_periph_clk_init(pxa_unit);
+
+       pxa910_axi_periph_clk_init(pxa_unit);
+
+       pxa910_clk_reset_init(np, pxa_unit);
+}
+
+CLK_OF_DECLARE(pxa910_clk, "marvell,pxa910-clock", pxa910_clk_init);
index 014396b028a2c8c137723ccf2063f2dba6404f02..93e967c0f972f954ef1f7af9bb2f90e0c3241cae 100644 (file)
@@ -47,7 +47,7 @@
 
 static DEFINE_SPINLOCK(clk_lock);
 
-static struct clk_factor_masks uart_factor_masks = {
+static struct mmp_clk_factor_masks uart_factor_masks = {
        .factor = 2,
        .num_mask = 0x1fff,
        .den_mask = 0x1fff,
@@ -55,7 +55,7 @@ static struct clk_factor_masks uart_factor_masks = {
        .den_shift = 0,
 };
 
-static struct clk_factor_tbl uart_factor_tbl[] = {
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
        {.num = 8125, .den = 1536},     /*14.745MHZ */
 };
 
@@ -158,7 +158,7 @@ void __init pxa168_clk_init(void)
        uart_pll = mmp_clk_register_factor("uart_pll", "pll1_4", 0,
                                mpmu_base + MPMU_UART_PLL,
                                &uart_factor_masks, uart_factor_tbl,
-                               ARRAY_SIZE(uart_factor_tbl));
+                               ARRAY_SIZE(uart_factor_tbl), &clk_lock);
        clk_set_rate(uart_pll, 14745600);
        clk_register_clkdev(uart_pll, "uart_pll", NULL);
 
index 9efc6a47535d3bf07747e3373078daef8162794a..993abcdb32cce825fdb1d2ea835cb69f3ecf94fc 100644 (file)
@@ -45,7 +45,7 @@
 
 static DEFINE_SPINLOCK(clk_lock);
 
-static struct clk_factor_masks uart_factor_masks = {
+static struct mmp_clk_factor_masks uart_factor_masks = {
        .factor = 2,
        .num_mask = 0x1fff,
        .den_mask = 0x1fff,
@@ -53,7 +53,7 @@ static struct clk_factor_masks uart_factor_masks = {
        .den_shift = 0,
 };
 
-static struct clk_factor_tbl uart_factor_tbl[] = {
+static struct mmp_clk_factor_tbl uart_factor_tbl[] = {
        {.num = 8125, .den = 1536},     /*14.745MHZ */
 };
 
@@ -163,7 +163,7 @@ void __init pxa910_clk_init(void)
        uart_pll =  mmp_clk_register_factor("uart_pll", "pll1_4", 0,
                                mpmu_base + MPMU_UART_PLL,
                                &uart_factor_masks, uart_factor_tbl,
-                               ARRAY_SIZE(uart_factor_tbl));
+                               ARRAY_SIZE(uart_factor_tbl), &clk_lock);
        clk_set_rate(uart_pll, 14745600);
        clk_register_clkdev(uart_pll, "uart_pll", NULL);
 
diff --git a/drivers/clk/mmp/clk.c b/drivers/clk/mmp/clk.c
new file mode 100644 (file)
index 0000000..cf038ef
--- /dev/null
@@ -0,0 +1,192 @@
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#include "clk.h"
+
+void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit,
+               int nr_clks)
+{
+       static struct clk **clk_table;
+
+       clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
+       if (!clk_table)
+               return;
+
+       unit->clk_table = clk_table;
+       unit->nr_clks = nr_clks;
+       unit->clk_data.clks = clk_table;
+       unit->clk_data.clk_num = nr_clks;
+       of_clk_add_provider(np, of_clk_src_onecell_get, &unit->clk_data);
+}
+
+void mmp_register_fixed_rate_clks(struct mmp_clk_unit *unit,
+                               struct mmp_param_fixed_rate_clk *clks,
+                               int size)
+{
+       int i;
+       struct clk *clk;
+
+       for (i = 0; i < size; i++) {
+               clk = clk_register_fixed_rate(NULL, clks[i].name,
+                                       clks[i].parent_name,
+                                       clks[i].flags,
+                                       clks[i].fixed_rate);
+               if (IS_ERR(clk)) {
+                       pr_err("%s: failed to register clock %s\n",
+                              __func__, clks[i].name);
+                       continue;
+               }
+               if (clks[i].id)
+                       unit->clk_table[clks[i].id] = clk;
+       }
+}
+
+void mmp_register_fixed_factor_clks(struct mmp_clk_unit *unit,
+                               struct mmp_param_fixed_factor_clk *clks,
+                               int size)
+{
+       struct clk *clk;
+       int i;
+
+       for (i = 0; i < size; i++) {
+               clk = clk_register_fixed_factor(NULL, clks[i].name,
+                                               clks[i].parent_name,
+                                               clks[i].flags, clks[i].mult,
+                                               clks[i].div);
+               if (IS_ERR(clk)) {
+                       pr_err("%s: failed to register clock %s\n",
+                              __func__, clks[i].name);
+                       continue;
+               }
+               if (clks[i].id)
+                       unit->clk_table[clks[i].id] = clk;
+       }
+}
+
+void mmp_register_general_gate_clks(struct mmp_clk_unit *unit,
+                               struct mmp_param_general_gate_clk *clks,
+                               void __iomem *base, int size)
+{
+       struct clk *clk;
+       int i;
+
+       for (i = 0; i < size; i++) {
+               clk = clk_register_gate(NULL, clks[i].name,
+                                       clks[i].parent_name,
+                                       clks[i].flags,
+                                       base + clks[i].offset,
+                                       clks[i].bit_idx,
+                                       clks[i].gate_flags,
+                                       clks[i].lock);
+
+               if (IS_ERR(clk)) {
+                       pr_err("%s: failed to register clock %s\n",
+                              __func__, clks[i].name);
+                       continue;
+               }
+               if (clks[i].id)
+                       unit->clk_table[clks[i].id] = clk;
+       }
+}
+
+void mmp_register_gate_clks(struct mmp_clk_unit *unit,
+                       struct mmp_param_gate_clk *clks,
+                       void __iomem *base, int size)
+{
+       struct clk *clk;
+       int i;
+
+       for (i = 0; i < size; i++) {
+               clk = mmp_clk_register_gate(NULL, clks[i].name,
+                                       clks[i].parent_name,
+                                       clks[i].flags,
+                                       base + clks[i].offset,
+                                       clks[i].mask,
+                                       clks[i].val_enable,
+                                       clks[i].val_disable,
+                                       clks[i].gate_flags,
+                                       clks[i].lock);
+
+               if (IS_ERR(clk)) {
+                       pr_err("%s: failed to register clock %s\n",
+                              __func__, clks[i].name);
+                       continue;
+               }
+               if (clks[i].id)
+                       unit->clk_table[clks[i].id] = clk;
+       }
+}
+
+void mmp_register_mux_clks(struct mmp_clk_unit *unit,
+                       struct mmp_param_mux_clk *clks,
+                       void __iomem *base, int size)
+{
+       struct clk *clk;
+       int i;
+
+       for (i = 0; i < size; i++) {
+               clk = clk_register_mux(NULL, clks[i].name,
+                                       clks[i].parent_name,
+                                       clks[i].num_parents,
+                                       clks[i].flags,
+                                       base + clks[i].offset,
+                                       clks[i].shift,
+                                       clks[i].width,
+                                       clks[i].mux_flags,
+                                       clks[i].lock);
+
+               if (IS_ERR(clk)) {
+                       pr_err("%s: failed to register clock %s\n",
+                              __func__, clks[i].name);
+                       continue;
+               }
+               if (clks[i].id)
+                       unit->clk_table[clks[i].id] = clk;
+       }
+}
+
+void mmp_register_div_clks(struct mmp_clk_unit *unit,
+                       struct mmp_param_div_clk *clks,
+                       void __iomem *base, int size)
+{
+       struct clk *clk;
+       int i;
+
+       for (i = 0; i < size; i++) {
+               clk = clk_register_divider(NULL, clks[i].name,
+                                       clks[i].parent_name,
+                                       clks[i].flags,
+                                       base + clks[i].offset,
+                                       clks[i].shift,
+                                       clks[i].width,
+                                       clks[i].div_flags,
+                                       clks[i].lock);
+
+               if (IS_ERR(clk)) {
+                       pr_err("%s: failed to register clock %s\n",
+                              __func__, clks[i].name);
+                       continue;
+               }
+               if (clks[i].id)
+                       unit->clk_table[clks[i].id] = clk;
+       }
+}
+
+void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
+                       struct clk *clk)
+{
+       if (IS_ERR_OR_NULL(clk)) {
+               pr_err("CLK %d has invalid pointer %p\n", id, clk);
+               return;
+       }
+       if (id > unit->nr_clks) {
+               pr_err("CLK %d is invalid\n", id);
+               return;
+       }
+
+       unit->clk_table[id] = clk;
+}
index ab86dd4a416a5fa6b75aa74d7588894e7b8c4c2e..adf9b711b03702e61c989b315124b66aafa6c07a 100644 (file)
 #define APBC_NO_BUS_CTRL       BIT(0)
 #define APBC_POWER_CTRL                BIT(1)
 
-struct clk_factor_masks {
-       unsigned int    factor;
-       unsigned int    num_mask;
-       unsigned int    den_mask;
-       unsigned int    num_shift;
-       unsigned int    den_shift;
+
+/* Clock type "factor" */
+struct mmp_clk_factor_masks {
+       unsigned int factor;
+       unsigned int num_mask;
+       unsigned int den_mask;
+       unsigned int num_shift;
+       unsigned int den_shift;
 };
 
-struct clk_factor_tbl {
+struct mmp_clk_factor_tbl {
        unsigned int num;
        unsigned int den;
 };
 
+struct mmp_clk_factor {
+       struct clk_hw hw;
+       void __iomem *base;
+       struct mmp_clk_factor_masks *masks;
+       struct mmp_clk_factor_tbl *ftbl;
+       unsigned int ftbl_cnt;
+       spinlock_t *lock;
+};
+
+extern struct clk *mmp_clk_register_factor(const char *name,
+               const char *parent_name, unsigned long flags,
+               void __iomem *base, struct mmp_clk_factor_masks *masks,
+               struct mmp_clk_factor_tbl *ftbl, unsigned int ftbl_cnt,
+               spinlock_t *lock);
+
+/* Clock type "mix" */
+#define MMP_CLK_BITS_MASK(width, shift)                        \
+               (((1 << (width)) - 1) << (shift))
+#define MMP_CLK_BITS_GET_VAL(data, width, shift)       \
+               ((data & MMP_CLK_BITS_MASK(width, shift)) >> (shift))
+#define MMP_CLK_BITS_SET_VAL(val, width, shift)                \
+               (((val) << (shift)) & MMP_CLK_BITS_MASK(width, shift))
+
+enum {
+       MMP_CLK_MIX_TYPE_V1,
+       MMP_CLK_MIX_TYPE_V2,
+       MMP_CLK_MIX_TYPE_V3,
+};
+
+/* The register layout */
+struct mmp_clk_mix_reg_info {
+       void __iomem *reg_clk_ctrl;
+       void __iomem *reg_clk_sel;
+       u8 width_div;
+       u8 shift_div;
+       u8 width_mux;
+       u8 shift_mux;
+       u8 bit_fc;
+};
+
+/* The suggested clock table from user. */
+struct mmp_clk_mix_clk_table {
+       unsigned long rate;
+       u8 parent_index;
+       unsigned int divisor;
+       unsigned int valid;
+};
+
+struct mmp_clk_mix_config {
+       struct mmp_clk_mix_reg_info reg_info;
+       struct mmp_clk_mix_clk_table *table;
+       unsigned int table_size;
+       u32 *mux_table;
+       struct clk_div_table *div_table;
+       u8 div_flags;
+       u8 mux_flags;
+};
+
+struct mmp_clk_mix {
+       struct clk_hw hw;
+       struct mmp_clk_mix_reg_info reg_info;
+       struct mmp_clk_mix_clk_table *table;
+       u32 *mux_table;
+       struct clk_div_table *div_table;
+       unsigned int table_size;
+       u8 div_flags;
+       u8 mux_flags;
+       unsigned int type;
+       spinlock_t *lock;
+};
+
+extern const struct clk_ops mmp_clk_mix_ops;
+extern struct clk *mmp_clk_register_mix(struct device *dev,
+                                       const char *name,
+                                       const char **parent_names,
+                                       u8 num_parents,
+                                       unsigned long flags,
+                                       struct mmp_clk_mix_config *config,
+                                       spinlock_t *lock);
+
+
+/* Clock type "gate". MMP private gate */
+#define MMP_CLK_GATE_NEED_DELAY                BIT(0)
+
+struct mmp_clk_gate {
+       struct clk_hw hw;
+       void __iomem *reg;
+       u32 mask;
+       u32 val_enable;
+       u32 val_disable;
+       unsigned int flags;
+       spinlock_t *lock;
+};
+
+extern const struct clk_ops mmp_clk_gate_ops;
+extern struct clk *mmp_clk_register_gate(struct device *dev, const char *name,
+                       const char *parent_name, unsigned long flags,
+                       void __iomem *reg, u32 mask, u32 val_enable,
+                       u32 val_disable, unsigned int gate_flags,
+                       spinlock_t *lock);
+
+
 extern struct clk *mmp_clk_register_pll2(const char *name,
                const char *parent_name, unsigned long flags);
 extern struct clk *mmp_clk_register_apbc(const char *name,
@@ -28,8 +132,108 @@ extern struct clk *mmp_clk_register_apbc(const char *name,
 extern struct clk *mmp_clk_register_apmu(const char *name,
                const char *parent_name, void __iomem *base, u32 enable_mask,
                spinlock_t *lock);
-extern struct clk *mmp_clk_register_factor(const char *name,
-               const char *parent_name, unsigned long flags,
-               void __iomem *base, struct clk_factor_masks *masks,
-               struct clk_factor_tbl *ftbl, unsigned int ftbl_cnt);
+
+struct mmp_clk_unit {
+       unsigned int nr_clks;
+       struct clk **clk_table;
+       struct clk_onecell_data clk_data;
+};
+
+struct mmp_param_fixed_rate_clk {
+       unsigned int id;
+       char *name;
+       const char *parent_name;
+       unsigned long flags;
+       unsigned long fixed_rate;
+};
+void mmp_register_fixed_rate_clks(struct mmp_clk_unit *unit,
+                               struct mmp_param_fixed_rate_clk *clks,
+                               int size);
+
+struct mmp_param_fixed_factor_clk {
+       unsigned int id;
+       char *name;
+       const char *parent_name;
+       unsigned long mult;
+       unsigned long div;
+       unsigned long flags;
+};
+void mmp_register_fixed_factor_clks(struct mmp_clk_unit *unit,
+                               struct mmp_param_fixed_factor_clk *clks,
+                               int size);
+
+struct mmp_param_general_gate_clk {
+       unsigned int id;
+       const char *name;
+       const char *parent_name;
+       unsigned long flags;
+       unsigned long offset;
+       u8 bit_idx;
+       u8 gate_flags;
+       spinlock_t *lock;
+};
+void mmp_register_general_gate_clks(struct mmp_clk_unit *unit,
+                               struct mmp_param_general_gate_clk *clks,
+                               void __iomem *base, int size);
+
+struct mmp_param_gate_clk {
+       unsigned int id;
+       char *name;
+       const char *parent_name;
+       unsigned long flags;
+       unsigned long offset;
+       u32 mask;
+       u32 val_enable;
+       u32 val_disable;
+       unsigned int gate_flags;
+       spinlock_t *lock;
+};
+void mmp_register_gate_clks(struct mmp_clk_unit *unit,
+                       struct mmp_param_gate_clk *clks,
+                       void __iomem *base, int size);
+
+struct mmp_param_mux_clk {
+       unsigned int id;
+       char *name;
+       const char **parent_name;
+       u8 num_parents;
+       unsigned long flags;
+       unsigned long offset;
+       u8 shift;
+       u8 width;
+       u8 mux_flags;
+       spinlock_t *lock;
+};
+void mmp_register_mux_clks(struct mmp_clk_unit *unit,
+                       struct mmp_param_mux_clk *clks,
+                       void __iomem *base, int size);
+
+struct mmp_param_div_clk {
+       unsigned int id;
+       char *name;
+       const char *parent_name;
+       unsigned long flags;
+       unsigned long offset;
+       u8 shift;
+       u8 width;
+       u8 div_flags;
+       spinlock_t *lock;
+};
+void mmp_register_div_clks(struct mmp_clk_unit *unit,
+                       struct mmp_param_div_clk *clks,
+                       void __iomem *base, int size);
+
+#define DEFINE_MIX_REG_INFO(w_d, s_d, w_m, s_m, fc)    \
+{                                                      \
+       .width_div = (w_d),                             \
+       .shift_div = (s_d),                             \
+       .width_mux = (w_m),                             \
+       .shift_mux = (s_m),                             \
+       .bit_fc = (fc),                                 \
+}
+
+void mmp_clk_init(struct device_node *np, struct mmp_clk_unit *unit,
+               int nr_clks);
+void mmp_clk_add(struct mmp_clk_unit *unit, unsigned int id,
+               struct clk *clk);
 #endif
diff --git a/drivers/clk/mmp/reset.c b/drivers/clk/mmp/reset.c
new file mode 100644 (file)
index 0000000..b54da1f
--- /dev/null
@@ -0,0 +1,99 @@
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/reset-controller.h>
+
+#include "reset.h"
+
+#define rcdev_to_unit(rcdev) container_of(rcdev, struct mmp_clk_reset_unit, rcdev)
+
+static int mmp_of_reset_xlate(struct reset_controller_dev *rcdev,
+                         const struct of_phandle_args *reset_spec)
+{
+       struct mmp_clk_reset_unit *unit = rcdev_to_unit(rcdev);
+       struct mmp_clk_reset_cell *cell;
+       int i;
+
+       if (WARN_ON(reset_spec->args_count != rcdev->of_reset_n_cells))
+               return -EINVAL;
+
+       for (i = 0; i < rcdev->nr_resets; i++) {
+               cell = &unit->cells[i];
+               if (cell->clk_id == reset_spec->args[0])
+                       break;
+       }
+
+       if (i == rcdev->nr_resets)
+               return -EINVAL;
+
+       return i;
+}
+
+static int mmp_clk_reset_assert(struct reset_controller_dev *rcdev,
+                               unsigned long id)
+{
+       struct mmp_clk_reset_unit *unit = rcdev_to_unit(rcdev);
+       struct mmp_clk_reset_cell *cell;
+       unsigned long flags = 0;
+       u32 val;
+
+       cell = &unit->cells[id];
+       if (cell->lock)
+               spin_lock_irqsave(cell->lock, flags);
+
+       val = readl(cell->reg);
+       val |= cell->bits;
+       writel(val, cell->reg);
+
+       if (cell->lock)
+               spin_unlock_irqrestore(cell->lock, flags);
+
+       return 0;
+}
+
+static int mmp_clk_reset_deassert(struct reset_controller_dev *rcdev,
+                               unsigned long id)
+{
+       struct mmp_clk_reset_unit *unit = rcdev_to_unit(rcdev);
+       struct mmp_clk_reset_cell *cell;
+       unsigned long flags = 0;
+       u32 val;
+
+       cell = &unit->cells[id];
+       if (cell->lock)
+               spin_lock_irqsave(cell->lock, flags);
+
+       val = readl(cell->reg);
+       val &= ~cell->bits;
+       writel(val, cell->reg);
+
+       if (cell->lock)
+               spin_unlock_irqrestore(cell->lock, flags);
+
+       return 0;
+}
+
+static struct reset_control_ops mmp_clk_reset_ops = {
+       .assert         = mmp_clk_reset_assert,
+       .deassert       = mmp_clk_reset_deassert,
+};
+
+void mmp_clk_reset_register(struct device_node *np,
+                       struct mmp_clk_reset_cell *cells, int nr_resets)
+{
+       struct mmp_clk_reset_unit *unit;
+
+       unit = kzalloc(sizeof(*unit), GFP_KERNEL);
+       if (!unit)
+               return;
+
+       unit->cells = cells;
+       unit->rcdev.of_reset_n_cells = 1;
+       unit->rcdev.nr_resets = nr_resets;
+       unit->rcdev.ops = &mmp_clk_reset_ops;
+       unit->rcdev.of_node = np;
+       unit->rcdev.of_xlate = mmp_of_reset_xlate;
+
+       reset_controller_register(&unit->rcdev);
+}
diff --git a/drivers/clk/mmp/reset.h b/drivers/clk/mmp/reset.h
new file mode 100644 (file)
index 0000000..be8b1a7
--- /dev/null
@@ -0,0 +1,31 @@
+#ifndef __MACH_MMP_CLK_RESET_H
+#define __MACH_MMP_CLK_RESET_H
+
+#include <linux/reset-controller.h>
+
+#define MMP_RESET_INVERT       1
+
+struct mmp_clk_reset_cell {
+       unsigned int clk_id;
+       void __iomem *reg;
+       u32 bits;
+       unsigned int flags;
+       spinlock_t *lock;
+};
+
+struct mmp_clk_reset_unit {
+       struct reset_controller_dev rcdev;
+       struct mmp_clk_reset_cell *cells;
+};
+
+#ifdef CONFIG_RESET_CONTROLLER
+void mmp_clk_reset_register(struct device_node *np,
+                       struct mmp_clk_reset_cell *cells, int nr_resets);
+#else
+static inline void mmp_clk_reset_register(struct device_node *np,
+                       struct mmp_clk_reset_cell *cells, int nr_resets)
+{
+}
+#endif
+
+#endif
index 4ff2abcd500b8e2109241b56152eeef6dd46f0e0..38e9153446059a1fb0c0728c841517cf99747f40 100644 (file)
@@ -1,2 +1,3 @@
 obj-y                          += clk-pxa.o
+obj-$(CONFIG_PXA25x)           += clk-pxa25x.o
 obj-$(CONFIG_PXA27x)           += clk-pxa27x.o
index ef3c05389c0aeb4704a6f2c2e23720eda6311a83..4e834753ab094500677811702c37ad005503929b 100644 (file)
@@ -26,12 +26,20 @@ static struct clk_onecell_data onecell_data = {
        .clk_num = CLK_MAX,
 };
 
-#define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk_cken, hw)
+struct pxa_clk {
+       struct clk_hw hw;
+       struct clk_fixed_factor lp;
+       struct clk_fixed_factor hp;
+       struct clk_gate gate;
+       bool (*is_in_low_power)(void);
+};
+
+#define to_pxa_clk(_hw) container_of(_hw, struct pxa_clk, hw)
 
 static unsigned long cken_recalc_rate(struct clk_hw *hw,
                                      unsigned long parent_rate)
 {
-       struct pxa_clk_cken *pclk = to_pxa_clk(hw);
+       struct pxa_clk *pclk = to_pxa_clk(hw);
        struct clk_fixed_factor *fix;
 
        if (!pclk->is_in_low_power || pclk->is_in_low_power())
@@ -48,7 +56,7 @@ static struct clk_ops cken_rate_ops = {
 
 static u8 cken_get_parent(struct clk_hw *hw)
 {
-       struct pxa_clk_cken *pclk = to_pxa_clk(hw);
+       struct pxa_clk *pclk = to_pxa_clk(hw);
 
        if (!pclk->is_in_low_power)
                return 0;
@@ -69,29 +77,32 @@ void __init clkdev_pxa_register(int ckid, const char *con_id,
                clk_register_clkdev(clk, con_id, dev_id);
 }
 
-int __init clk_pxa_cken_init(struct pxa_clk_cken *clks, int nb_clks)
+int __init clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks)
 {
        int i;
-       struct pxa_clk_cken *pclk;
+       struct pxa_clk *pxa_clk;
        struct clk *clk;
 
        for (i = 0; i < nb_clks; i++) {
-               pclk = clks + i;
-               pclk->gate.lock = &lock;
-               clk = clk_register_composite(NULL, pclk->name,
-                                            pclk->parent_names, 2,
-                                            &pclk->hw, &cken_mux_ops,
-                                            &pclk->hw, &cken_rate_ops,
-                                            &pclk->gate.hw, &clk_gate_ops,
-                                            pclk->flags);
-               clkdev_pxa_register(pclk->ckid, pclk->con_id, pclk->dev_id,
-                                   clk);
+               pxa_clk = kzalloc(sizeof(*pxa_clk), GFP_KERNEL);
+               pxa_clk->is_in_low_power = clks[i].is_in_low_power;
+               pxa_clk->lp = clks[i].lp;
+               pxa_clk->hp = clks[i].hp;
+               pxa_clk->gate = clks[i].gate;
+               pxa_clk->gate.lock = &lock;
+               clk = clk_register_composite(NULL, clks[i].name,
+                                            clks[i].parent_names, 2,
+                                            &pxa_clk->hw, &cken_mux_ops,
+                                            &pxa_clk->hw, &cken_rate_ops,
+                                            &pxa_clk->gate.hw, &clk_gate_ops,
+                                            clks[i].flags);
+               clkdev_pxa_register(clks[i].ckid, clks[i].con_id,
+                                   clks[i].dev_id, clk);
        }
        return 0;
 }
 
-static void __init pxa_dt_clocks_init(struct device_node *np)
+void __init clk_pxa_dt_common_init(struct device_node *np)
 {
        of_clk_add_provider(np, of_clk_src_onecell_get, &onecell_data);
 }
-CLK_OF_DECLARE(pxa_clks, "marvell,pxa-clocks", pxa_dt_clocks_init);
index 5fe219d06b490c49a7203dd547fe65b537089c73..323965430111943b33dad32b3e8d5a889c7dead2 100644 (file)
@@ -25,7 +25,7 @@
        static struct clk_ops name ## _rate_ops = {             \
                .recalc_rate = name ## _get_rate,               \
        };                                                      \
-       static struct clk *clk_register_ ## name(void)          \
+       static struct clk * __init clk_register_ ## name(void)  \
        {                                                       \
                return clk_register_composite(NULL, clk_name,   \
                        name ## _parents,                       \
@@ -40,7 +40,7 @@
        static struct clk_ops name ## _rate_ops = {             \
                .recalc_rate = name ## _get_rate,               \
        };                                                      \
-       static struct clk *clk_register_ ## name(void)          \
+       static struct clk * __init clk_register_ ## name(void)  \
        {                                                       \
                return clk_register_composite(NULL, clk_name,   \
                        name ## _parents,                       \
@@ -66,7 +66,7 @@
  *  |    Clock   | --- | / div_hp  |
  *  +------------+     +-----------+
  */
-struct pxa_clk_cken {
+struct desc_clk_cken {
        struct clk_hw hw;
        int ckid;
        const char *name;
@@ -102,6 +102,7 @@ static int dummy_clk_set_parent(struct clk_hw *hw, u8 index)
 
 extern void clkdev_pxa_register(int ckid, const char *con_id,
                                const char *dev_id, struct clk *clk);
-extern int clk_pxa_cken_init(struct pxa_clk_cken *clks, int nb_clks);
+extern int clk_pxa_cken_init(const struct desc_clk_cken *clks, int nb_clks);
+void clk_pxa_dt_common_init(struct device_node *np);
 
 #endif
diff --git a/drivers/clk/pxa/clk-pxa25x.c b/drivers/clk/pxa/clk-pxa25x.c
new file mode 100644 (file)
index 0000000..6cd88d9
--- /dev/null
@@ -0,0 +1,273 @@
+/*
+ * Marvell PXA25x family clocks
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * Heavily inspired from former arch/arm/mach-pxa/pxa25x.c.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * For non-devicetree platforms. Once pxa is fully converted to devicetree, this
+ * should go away.
+ */
+#include <linux/clk-provider.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <mach/pxa25x.h>
+#include <mach/pxa2xx-regs.h>
+
+#include <dt-bindings/clock/pxa-clock.h>
+#include "clk-pxa.h"
+
+#define KHz 1000
+#define MHz (1000 * 1000)
+
+enum {
+       PXA_CORE_RUN = 0,
+       PXA_CORE_TURBO,
+};
+
+/*
+ * Various clock factors driven by the CCCR register.
+ */
+
+/* Crystal Frequency to Memory Frequency Multiplier (L) */
+static unsigned char L_clk_mult[32] = { 0, 27, 32, 36, 40, 45, 0, };
+
+/* Memory Frequency to Run Mode Frequency Multiplier (M) */
+static unsigned char M_clk_mult[4] = { 0, 1, 2, 4 };
+
+/* Run Mode Frequency to Turbo Mode Frequency Multiplier (N) */
+/* Note: we store the value N * 2 here. */
+static unsigned char N2_clk_mult[8] = { 0, 0, 2, 3, 4, 0, 6, 0 };
+
+static const char * const get_freq_khz[] = {
+       "core", "run", "cpll", "memory"
+};
+
+/*
+ * Get the clock frequency as reflected by CCCR and the turbo flag.
+ * We assume these values have been applied via a fcs.
+ * If info is not 0 we also display the current settings.
+ */
+unsigned int pxa25x_get_clk_frequency_khz(int info)
+{
+       struct clk *clk;
+       unsigned long clks[5];
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(get_freq_khz); i++) {
+               clk = clk_get(NULL, get_freq_khz[i]);
+               if (IS_ERR(clk)) {
+                       clks[i] = 0;
+               } else {
+                       clks[i] = clk_get_rate(clk);
+                       clk_put(clk);
+               }
+       }
+
+       if (info) {
+               pr_info("Run Mode clock: %ld.%02ldMHz\n",
+                       clks[1] / 1000000, (clks[1] % 1000000) / 10000);
+               pr_info("Turbo Mode clock: %ld.%02ldMHz\n",
+                       clks[2] / 1000000, (clks[2] % 1000000) / 10000);
+               pr_info("Memory clock: %ld.%02ldMHz\n",
+                       clks[3] / 1000000, (clks[3] % 1000000) / 10000);
+       }
+
+       return (unsigned int)clks[0];
+}
+
+static unsigned long clk_pxa25x_memory_get_rate(struct clk_hw *hw,
+                                               unsigned long parent_rate)
+{
+       unsigned long cccr = CCCR;
+       unsigned int m = M_clk_mult[(cccr >> 5) & 0x03];
+
+       return parent_rate / m;
+}
+PARENTS(clk_pxa25x_memory) = { "run" };
+RATE_RO_OPS(clk_pxa25x_memory, "memory");
+
+PARENTS(pxa25x_pbus95) = { "ppll_95_85mhz", "ppll_95_85mhz" };
+PARENTS(pxa25x_pbus147) = { "ppll_147_46mhz", "ppll_147_46mhz" };
+PARENTS(pxa25x_osc3) = { "osc_3_6864mhz", "osc_3_6864mhz" };
+
+#define PXA25X_CKEN(dev_id, con_id, parents, mult, div,                        \
+                   bit, is_lp, flags)                                  \
+       PXA_CKEN(dev_id, con_id, bit, parents, mult, div, mult, div,    \
+                is_lp,  &CKEN, CKEN_ ## bit, flags)
+#define PXA25X_PBUS95_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay)        \
+       PXA25X_CKEN(dev_id, con_id, pxa25x_pbus95_parents, mult_hp,     \
+                   div_hp, bit, NULL, 0)
+#define PXA25X_PBUS147_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay)\
+       PXA25X_CKEN(dev_id, con_id, pxa25x_pbus147_parents, mult_hp,    \
+                   div_hp, bit, NULL, 0)
+#define PXA25X_OSC3_CKEN(dev_id, con_id, bit, mult_hp, div_hp, delay)  \
+       PXA25X_CKEN(dev_id, con_id, pxa25x_osc3_parents, mult_hp,       \
+                   div_hp, bit, NULL, 0)
+
+#define PXA25X_CKEN_1RATE(dev_id, con_id, bit, parents, delay)         \
+       PXA_CKEN_1RATE(dev_id, con_id, bit, parents,                    \
+                      &CKEN, CKEN_ ## bit, 0)
+#define PXA25X_CKEN_1RATE_AO(dev_id, con_id, bit, parents, delay)      \
+       PXA_CKEN_1RATE(dev_id, con_id, bit, parents,                    \
+                      &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
+
+static struct desc_clk_cken pxa25x_clocks[] __initdata = {
+       PXA25X_PBUS95_CKEN("pxa2xx-mci.0", NULL, MMC, 1, 5, 0),
+       PXA25X_PBUS95_CKEN("pxa2xx-i2c.0", NULL, I2C, 1, 3, 0),
+       PXA25X_PBUS95_CKEN("pxa2xx-ir", "FICPCLK", FICP, 1, 2, 0),
+       PXA25X_PBUS95_CKEN("pxa25x-udc", NULL, USB, 1, 2, 5),
+       PXA25X_PBUS147_CKEN("pxa2xx-uart.0", NULL, FFUART, 1, 10, 1),
+       PXA25X_PBUS147_CKEN("pxa2xx-uart.1", NULL, BTUART, 1, 10, 1),
+       PXA25X_PBUS147_CKEN("pxa2xx-uart.2", NULL, STUART, 1, 10, 1),
+       PXA25X_PBUS147_CKEN("pxa2xx-uart.3", NULL, HWUART, 1, 10, 1),
+       PXA25X_PBUS147_CKEN("pxa2xx-i2s", NULL, I2S, 1, 10, 0),
+       PXA25X_PBUS147_CKEN(NULL, "AC97CLK", AC97, 1, 12, 0),
+       PXA25X_OSC3_CKEN("pxa25x-ssp.0", NULL, SSP, 1, 1, 0),
+       PXA25X_OSC3_CKEN("pxa25x-nssp.1", NULL, NSSP, 1, 1, 0),
+       PXA25X_OSC3_CKEN("pxa25x-nssp.2", NULL, ASSP, 1, 1, 0),
+       PXA25X_OSC3_CKEN("pxa25x-pwm.0", NULL, PWM0, 1, 1, 0),
+       PXA25X_OSC3_CKEN("pxa25x-pwm.1", NULL, PWM1, 1, 1, 0),
+
+       PXA25X_CKEN_1RATE("pxa2xx-fb", NULL, LCD, clk_pxa25x_memory_parents, 0),
+       PXA25X_CKEN_1RATE_AO("pxa2xx-pcmcia", NULL, MEMC,
+                            clk_pxa25x_memory_parents, 0),
+};
+
+static u8 clk_pxa25x_core_get_parent(struct clk_hw *hw)
+{
+       unsigned long clkcfg;
+       unsigned int t;
+
+       asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
+       t  = clkcfg & (1 << 0);
+       if (t)
+               return PXA_CORE_TURBO;
+       return PXA_CORE_RUN;
+}
+
+static unsigned long clk_pxa25x_core_get_rate(struct clk_hw *hw,
+                                             unsigned long parent_rate)
+{
+       return parent_rate;
+}
+PARENTS(clk_pxa25x_core) = { "run", "cpll" };
+MUX_RO_RATE_RO_OPS(clk_pxa25x_core, "core");
+
+static unsigned long clk_pxa25x_run_get_rate(struct clk_hw *hw,
+                                            unsigned long parent_rate)
+{
+       unsigned long cccr = CCCR;
+       unsigned int n2 = N2_clk_mult[(cccr >> 7) & 0x07];
+
+       return (parent_rate / n2) * 2;
+}
+PARENTS(clk_pxa25x_run) = { "cpll" };
+RATE_RO_OPS(clk_pxa25x_run, "run");
+
+static unsigned long clk_pxa25x_cpll_get_rate(struct clk_hw *hw,
+       unsigned long parent_rate)
+{
+       unsigned long clkcfg, cccr = CCCR;
+       unsigned int l, m, n2, t;
+
+       asm("mrc\tp14, 0, %0, c6, c0, 0" : "=r" (clkcfg));
+       t = clkcfg & (1 << 0);
+       l  =  L_clk_mult[(cccr >> 0) & 0x1f];
+       m = M_clk_mult[(cccr >> 5) & 0x03];
+       n2 = N2_clk_mult[(cccr >> 7) & 0x07];
+
+       if (t)
+               return m * l * n2 * parent_rate / 2;
+       return m * l * parent_rate;
+}
+PARENTS(clk_pxa25x_cpll) = { "osc_3_6864mhz" };
+RATE_RO_OPS(clk_pxa25x_cpll, "cpll");
+
+static void __init pxa25x_register_core(void)
+{
+       clk_register_clk_pxa25x_cpll();
+       clk_register_clk_pxa25x_run();
+       clkdev_pxa_register(CLK_CORE, "core", NULL,
+                           clk_register_clk_pxa25x_core());
+}
+
+static void __init pxa25x_register_plls(void)
+{
+       clk_register_fixed_rate(NULL, "osc_3_6864mhz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               3686400);
+       clk_register_fixed_rate(NULL, "osc_32_768khz", NULL,
+                               CLK_GET_RATE_NOCACHE | CLK_IS_ROOT,
+                               32768);
+       clk_register_fixed_rate(NULL, "clk_dummy", NULL, CLK_IS_ROOT, 0);
+       clk_register_fixed_factor(NULL, "ppll_95_85mhz", "osc_3_6864mhz",
+                                 0, 26, 1);
+       clk_register_fixed_factor(NULL, "ppll_147_46mhz", "osc_3_6864mhz",
+                                 0, 40, 1);
+}
+
+static void __init pxa25x_base_clocks_init(void)
+{
+       pxa25x_register_plls();
+       pxa25x_register_core();
+       clk_register_clk_pxa25x_memory();
+}
+
+#define DUMMY_CLK(_con_id, _dev_id, _parent) \
+       { .con_id = _con_id, .dev_id = _dev_id, .parent = _parent }
+struct dummy_clk {
+       const char *con_id;
+       const char *dev_id;
+       const char *parent;
+};
+static struct dummy_clk dummy_clks[] __initdata = {
+       DUMMY_CLK(NULL, "pxa25x-gpio", "osc_32_768khz"),
+       DUMMY_CLK(NULL, "pxa26x-gpio", "osc_32_768khz"),
+       DUMMY_CLK("GPIO11_CLK", NULL, "osc_3_6864mhz"),
+       DUMMY_CLK("GPIO12_CLK", NULL, "osc_32_768khz"),
+       DUMMY_CLK(NULL, "sa1100-rtc", "osc_32_768khz"),
+       DUMMY_CLK("OSTIMER0", NULL, "osc_32_768khz"),
+       DUMMY_CLK("UARTCLK", "pxa2xx-ir", "STUART"),
+};
+
+static void __init pxa25x_dummy_clocks_init(void)
+{
+       struct clk *clk;
+       struct dummy_clk *d;
+       const char *name;
+       int i;
+
+       /*
+        * All pinctrl logic has been wiped out of the clock driver, especially
+        * for gpio11 and gpio12 outputs. Machine code should ensure proper pin
+        * control (ie. pxa2xx_mfp_config() invocation).
+        */
+       for (i = 0; i < ARRAY_SIZE(dummy_clks); i++) {
+               d = &dummy_clks[i];
+               name = d->dev_id ? d->dev_id : d->con_id;
+               clk = clk_register_fixed_factor(NULL, name, d->parent, 0, 1, 1);
+               clk_register_clkdev(clk, d->con_id, d->dev_id);
+       }
+}
+
+int __init pxa25x_clocks_init(void)
+{
+       pxa25x_base_clocks_init();
+       pxa25x_dummy_clocks_init();
+       return clk_pxa_cken_init(pxa25x_clocks, ARRAY_SIZE(pxa25x_clocks));
+}
+
+static void __init pxa25x_dt_clocks_init(struct device_node *np)
+{
+       pxa25x_clocks_init();
+       clk_pxa_dt_common_init(np);
+}
+CLK_OF_DECLARE(pxa25x_clks, "marvell,pxa250-core-clocks",
+              pxa25x_dt_clocks_init);
index 88b9fe13fa444b2a81a3bd8a2588b035357d0048..5f9b54b024b9e1607b724c8ceb5dcfba7b58c992 100644 (file)
@@ -111,7 +111,7 @@ PARENTS(pxa27x_membus) = { "lcd_base", "lcd_base" };
        PXA_CKEN_1RATE(dev_id, con_id, bit, parents,                    \
                       &CKEN, CKEN_ ## bit, CLK_IGNORE_UNUSED)
 
-static struct pxa_clk_cken pxa27x_clocks[] = {
+static struct desc_clk_cken pxa27x_clocks[] __initdata = {
        PXA27X_PBUS_CKEN("pxa2xx-uart.0", NULL, FFUART, 2, 42, 1),
        PXA27X_PBUS_CKEN("pxa2xx-uart.1", NULL, BTUART, 2, 42, 1),
        PXA27X_PBUS_CKEN("pxa2xx-uart.2", NULL, STUART, 2, 42, 1),
@@ -368,3 +368,10 @@ static int __init pxa27x_clocks_init(void)
        return clk_pxa_cken_init(pxa27x_clocks, ARRAY_SIZE(pxa27x_clocks));
 }
 postcore_initcall(pxa27x_clocks_init);
+
+static void __init pxa27x_dt_clocks_init(struct device_node *np)
+{
+       pxa27x_clocks_init();
+       clk_pxa_dt_common_init(np);
+}
+CLK_OF_DECLARE(pxa_clks, "marvell,pxa270-clocks", pxa27x_dt_clocks_init);
index b823bc3b625067c3b64b32b79a0b8b3e17541096..60873a7f45d94b3687bb342b86dd7bbfb9db67df 100644 (file)
@@ -141,7 +141,7 @@ struct pll_freq_tbl *find_freq(const struct pll_freq_tbl *f, unsigned long rate)
 
 static long
 clk_pll_determine_rate(struct clk_hw *hw, unsigned long rate,
-                      unsigned long *p_rate, struct clk **p)
+                      unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_pll *pll = to_clk_pll(hw);
        const struct pll_freq_tbl *f;
index b6e6959e89aafed9248ba6ddd882e0ece7a4deb1..0b93972c8807f11ef5e27bf6eb9f14e85b9f1055 100644 (file)
@@ -368,16 +368,17 @@ clk_dyn_rcg_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
 
 static long _freq_tbl_determine_rate(struct clk_hw *hw,
                const struct freq_tbl *f, unsigned long rate,
-               unsigned long *p_rate, struct clk **p)
+               unsigned long *p_rate, struct clk_hw **p_hw)
 {
        unsigned long clk_flags;
+       struct clk *p;
 
        f = qcom_find_freq(f, rate);
        if (!f)
                return -EINVAL;
 
        clk_flags = __clk_get_flags(hw->clk);
-       *p = clk_get_parent_by_index(hw->clk, f->src);
+       p = clk_get_parent_by_index(hw->clk, f->src);
        if (clk_flags & CLK_SET_RATE_PARENT) {
                rate = rate * f->pre_div;
                if (f->n) {
@@ -387,15 +388,16 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
                        rate = tmp;
                }
        } else {
-               rate =  __clk_get_rate(*p);
+               rate =  __clk_get_rate(p);
        }
+       *p_hw = __clk_get_hw(p);
        *p_rate = rate;
 
        return f->freq;
 }
 
 static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
-               unsigned long *p_rate, struct clk **p)
+               unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg *rcg = to_clk_rcg(hw);
 
@@ -403,7 +405,7 @@ static long clk_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
 }
 
 static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
-               unsigned long *p_rate, struct clk **p)
+               unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_dyn_rcg *rcg = to_clk_dyn_rcg(hw);
 
@@ -411,13 +413,15 @@ static long clk_dyn_rcg_determine_rate(struct clk_hw *hw, unsigned long rate,
 }
 
 static long clk_rcg_bypass_determine_rate(struct clk_hw *hw, unsigned long rate,
-               unsigned long *p_rate, struct clk **p)
+               unsigned long *p_rate, struct clk_hw **p_hw)
 {
        struct clk_rcg *rcg = to_clk_rcg(hw);
        const struct freq_tbl *f = rcg->freq_tbl;
+       struct clk *p;
 
-       *p = clk_get_parent_by_index(hw->clk, f->src);
-       *p_rate = __clk_round_rate(*p, rate);
+       p = clk_get_parent_by_index(hw->clk, f->src);
+       *p_hw = __clk_get_hw(p);
+       *p_rate = __clk_round_rate(p, rate);
 
        return *p_rate;
 }
index cfa9eb4fe9ca60c1420e7821f7d927e1fc82fb10..08b8b3729f539ee769f15d1f474e8c3718d1e640 100644 (file)
@@ -175,16 +175,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
 
 static long _freq_tbl_determine_rate(struct clk_hw *hw,
                const struct freq_tbl *f, unsigned long rate,
-               unsigned long *p_rate, struct clk **p)
+               unsigned long *p_rate, struct clk_hw **p_hw)
 {
        unsigned long clk_flags;
+       struct clk *p;
 
        f = qcom_find_freq(f, rate);
        if (!f)
                return -EINVAL;
 
        clk_flags = __clk_get_flags(hw->clk);
-       *p = clk_get_parent_by_index(hw->clk, f->src);
+       p = clk_get_parent_by_index(hw->clk, f->src);
        if (clk_flags & CLK_SET_RATE_PARENT) {
                if (f->pre_div) {
                        rate /= 2;
@@ -198,15 +199,16 @@ static long _freq_tbl_determine_rate(struct clk_hw *hw,
                        rate = tmp;
                }
        } else {
-               rate =  __clk_get_rate(*p);
+               rate =  __clk_get_rate(p);
        }
+       *p_hw = __clk_get_hw(p);
        *p_rate = rate;
 
        return f->freq;
 }
 
 static long clk_rcg2_determine_rate(struct clk_hw *hw, unsigned long rate,
-               unsigned long *p_rate, struct clk **p)
+               unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
 
@@ -359,7 +361,7 @@ static int clk_edp_pixel_set_rate_and_parent(struct clk_hw *hw,
 }
 
 static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
-                                unsigned long *p_rate, struct clk **p)
+                                unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
        const struct freq_tbl *f = rcg->freq_tbl;
@@ -371,7 +373,7 @@ static long clk_edp_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
        u32 hid_div;
 
        /* Force the correct parent */
-       *p = clk_get_parent_by_index(hw->clk, f->src);
+       *p = __clk_get_hw(clk_get_parent_by_index(hw->clk, f->src));
 
        if (src_rate == 810000000)
                frac = frac_table_810m;
@@ -410,18 +412,20 @@ const struct clk_ops clk_edp_pixel_ops = {
 EXPORT_SYMBOL_GPL(clk_edp_pixel_ops);
 
 static long clk_byte_determine_rate(struct clk_hw *hw, unsigned long rate,
-                        unsigned long *p_rate, struct clk **p)
+                        unsigned long *p_rate, struct clk_hw **p_hw)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
        const struct freq_tbl *f = rcg->freq_tbl;
        unsigned long parent_rate, div;
        u32 mask = BIT(rcg->hid_width) - 1;
+       struct clk *p;
 
        if (rate == 0)
                return -EINVAL;
 
-       *p = clk_get_parent_by_index(hw->clk, f->src);
-       *p_rate = parent_rate = __clk_round_rate(*p, rate);
+       p = clk_get_parent_by_index(hw->clk, f->src);
+       *p_hw = __clk_get_hw(p);
+       *p_rate = parent_rate = __clk_round_rate(p, rate);
 
        div = DIV_ROUND_UP((2 * parent_rate), rate) - 1;
        div = min_t(u32, div, mask);
@@ -472,14 +476,16 @@ static const struct frac_entry frac_table_pixel[] = {
 };
 
 static long clk_pixel_determine_rate(struct clk_hw *hw, unsigned long rate,
-                                unsigned long *p_rate, struct clk **p)
+                                unsigned long *p_rate, struct clk_hw **p)
 {
        struct clk_rcg2 *rcg = to_clk_rcg2(hw);
        unsigned long request, src_rate;
        int delta = 100000;
        const struct freq_tbl *f = rcg->freq_tbl;
        const struct frac_entry *frac = frac_table_pixel;
-       struct clk *parent = *p = clk_get_parent_by_index(hw->clk, f->src);
+       struct clk *parent = clk_get_parent_by_index(hw->clk, f->src);
+
+       *p = __clk_get_hw(parent);
 
        for (; frac->num; frac++) {
                request = (rate * frac->den) / frac->num;
index bd8514d63634bdb78ae0e75f801be478265f5f91..2714097f90db1138b1f71771461b15932b826cce 100644 (file)
@@ -6,6 +6,7 @@ obj-y   += clk-rockchip.o
 obj-y  += clk.o
 obj-y  += clk-pll.o
 obj-y  += clk-cpu.o
+obj-y  += clk-mmc-phase.o
 obj-$(CONFIG_RESET_CONTROLLER) += softrst.o
 
 obj-y  += clk-rk3188.o
diff --git a/drivers/clk/rockchip/clk-mmc-phase.c b/drivers/clk/rockchip/clk-mmc-phase.c
new file mode 100644 (file)
index 0000000..c842e3b
--- /dev/null
@@ -0,0 +1,154 @@
+/*
+ * Copyright 2014 Google, Inc
+ * Author: Alexandru M Stan <amstan@chromium.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/clk-provider.h>
+#include "clk.h"
+
+struct rockchip_mmc_clock {
+       struct clk_hw   hw;
+       void __iomem    *reg;
+       int             id;
+       int             shift;
+};
+
+#define to_mmc_clock(_hw) container_of(_hw, struct rockchip_mmc_clock, hw)
+
+#define RK3288_MMC_CLKGEN_DIV 2
+
+static unsigned long rockchip_mmc_recalc(struct clk_hw *hw,
+                                        unsigned long parent_rate)
+{
+       return parent_rate / RK3288_MMC_CLKGEN_DIV;
+}
+
+#define ROCKCHIP_MMC_DELAY_SEL BIT(10)
+#define ROCKCHIP_MMC_DEGREE_MASK 0x3
+#define ROCKCHIP_MMC_DELAYNUM_OFFSET 2
+#define ROCKCHIP_MMC_DELAYNUM_MASK (0xff << ROCKCHIP_MMC_DELAYNUM_OFFSET)
+
+#define PSECS_PER_SEC 1000000000000LL
+
+/*
+ * Each fine delay is between 40ps-80ps. Assume each fine delay is 60ps to
+ * simplify calculations. So 45degs could be anywhere between 33deg and 66deg.
+ */
+#define ROCKCHIP_MMC_DELAY_ELEMENT_PSEC 60
+
+static int rockchip_mmc_get_phase(struct clk_hw *hw)
+{
+       struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
+       unsigned long rate = clk_get_rate(hw->clk);
+       u32 raw_value;
+       u16 degrees;
+       u32 delay_num = 0;
+
+       raw_value = readl(mmc_clock->reg) >> (mmc_clock->shift);
+
+       degrees = (raw_value & ROCKCHIP_MMC_DEGREE_MASK) * 90;
+
+       if (raw_value & ROCKCHIP_MMC_DELAY_SEL) {
+               /* degrees/delaynum * 10000 */
+               unsigned long factor = (ROCKCHIP_MMC_DELAY_ELEMENT_PSEC / 10) *
+                                       36 * (rate / 1000000);
+
+               delay_num = (raw_value & ROCKCHIP_MMC_DELAYNUM_MASK);
+               delay_num >>= ROCKCHIP_MMC_DELAYNUM_OFFSET;
+               degrees += delay_num * factor / 10000;
+       }
+
+       return degrees % 360;
+}
+
+static int rockchip_mmc_set_phase(struct clk_hw *hw, int degrees)
+{
+       struct rockchip_mmc_clock *mmc_clock = to_mmc_clock(hw);
+       unsigned long rate = clk_get_rate(hw->clk);
+       u8 nineties, remainder;
+       u8 delay_num;
+       u32 raw_value;
+       u64 delay;
+
+       /* allow 22 to be 22.5 */
+       degrees++;
+       /* floor to 22.5 increment */
+       degrees -= ((degrees) * 10 % 225) / 10;
+
+       nineties = degrees / 90;
+       /* 22.5 multiples */
+       remainder = (degrees % 90) / 22;
+
+       delay = PSECS_PER_SEC;
+       do_div(delay, rate);
+       /* / 360 / 22.5 */
+       do_div(delay, 16);
+       do_div(delay, ROCKCHIP_MMC_DELAY_ELEMENT_PSEC);
+
+       delay *= remainder;
+       delay_num = (u8) min(delay, 255ULL);
+
+       raw_value = delay_num ? ROCKCHIP_MMC_DELAY_SEL : 0;
+       raw_value |= delay_num << ROCKCHIP_MMC_DELAYNUM_OFFSET;
+       raw_value |= nineties;
+       writel(HIWORD_UPDATE(raw_value, 0x07ff, mmc_clock->shift), mmc_clock->reg);
+
+       pr_debug("%s->set_phase(%d) delay_nums=%u reg[0x%p]=0x%03x actual_degrees=%d\n",
+               __clk_get_name(hw->clk), degrees, delay_num,
+               mmc_clock->reg, raw_value>>(mmc_clock->shift),
+               rockchip_mmc_get_phase(hw)
+       );
+
+       return 0;
+}
+
+static const struct clk_ops rockchip_mmc_clk_ops = {
+       .recalc_rate    = rockchip_mmc_recalc,
+       .get_phase      = rockchip_mmc_get_phase,
+       .set_phase      = rockchip_mmc_set_phase,
+};
+
+struct clk *rockchip_clk_register_mmc(const char *name,
+                               const char **parent_names, u8 num_parents,
+                               void __iomem *reg, int shift)
+{
+       struct clk_init_data init;
+       struct rockchip_mmc_clock *mmc_clock;
+       struct clk *clk;
+
+       mmc_clock = kmalloc(sizeof(*mmc_clock), GFP_KERNEL);
+       if (!mmc_clock)
+               return NULL;
+
+       init.num_parents = num_parents;
+       init.parent_names = parent_names;
+       init.ops = &rockchip_mmc_clk_ops;
+
+       mmc_clock->hw.init = &init;
+       mmc_clock->reg = reg;
+       mmc_clock->shift = shift;
+
+       if (name)
+               init.name = name;
+
+       clk = clk_register(NULL, &mmc_clock->hw);
+       if (IS_ERR(clk))
+               goto err_free;
+
+       return clk;
+
+err_free:
+       kfree(mmc_clock);
+       return NULL;
+}
index a3e886a38480a75060d310bc128944e54169c80d..f8d3baf275b211623efd553d7b7fdf23c609d7f0 100644 (file)
@@ -39,6 +39,7 @@ struct rockchip_clk_pll {
        int                     lock_offset;
        unsigned int            lock_shift;
        enum rockchip_pll_type  type;
+       u8                      flags;
        const struct rockchip_pll_rate_table *rate_table;
        unsigned int            rate_count;
        spinlock_t              *lock;
@@ -257,6 +258,55 @@ static int rockchip_rk3066_pll_is_enabled(struct clk_hw *hw)
        return !(pllcon & RK3066_PLLCON3_PWRDOWN);
 }
 
+static void rockchip_rk3066_pll_init(struct clk_hw *hw)
+{
+       struct rockchip_clk_pll *pll = to_rockchip_clk_pll(hw);
+       const struct rockchip_pll_rate_table *rate;
+       unsigned int nf, nr, no, bwadj;
+       unsigned long drate;
+       u32 pllcon;
+
+       if (!(pll->flags & ROCKCHIP_PLL_SYNC_RATE))
+               return;
+
+       drate = __clk_get_rate(hw->clk);
+       rate = rockchip_get_pll_settings(pll, drate);
+
+       /* when no rate setting for the current rate, rely on clk_set_rate */
+       if (!rate)
+               return;
+
+       pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(0));
+       nr = ((pllcon >> RK3066_PLLCON0_NR_SHIFT) & RK3066_PLLCON0_NR_MASK) + 1;
+       no = ((pllcon >> RK3066_PLLCON0_OD_SHIFT) & RK3066_PLLCON0_OD_MASK) + 1;
+
+       pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(1));
+       nf = ((pllcon >> RK3066_PLLCON1_NF_SHIFT) & RK3066_PLLCON1_NF_MASK) + 1;
+
+       pllcon = readl_relaxed(pll->reg_base + RK3066_PLLCON(2));
+       bwadj = (pllcon >> RK3066_PLLCON2_BWADJ_SHIFT) & RK3066_PLLCON2_BWADJ_MASK;
+
+       pr_debug("%s: pll %s@%lu: nr (%d:%d); no (%d:%d); nf(%d:%d), bwadj(%d:%d)\n",
+                __func__, __clk_get_name(hw->clk), drate, rate->nr, nr,
+               rate->no, no, rate->nf, nf, rate->bwadj, bwadj);
+       if (rate->nr != nr || rate->no != no || rate->nf != nf
+                                            || rate->bwadj != bwadj) {
+               struct clk *parent = __clk_get_parent(hw->clk);
+               unsigned long prate;
+
+               if (!parent) {
+                       pr_warn("%s: parent of %s not available\n",
+                               __func__, __clk_get_name(hw->clk));
+                       return;
+               }
+
+               pr_debug("%s: pll %s: rate params do not match rate table, adjusting\n",
+                        __func__, __clk_get_name(hw->clk));
+               prate = __clk_get_rate(parent);
+               rockchip_rk3066_pll_set_rate(hw, drate, prate);
+       }
+}
+
 static const struct clk_ops rockchip_rk3066_pll_clk_norate_ops = {
        .recalc_rate = rockchip_rk3066_pll_recalc_rate,
        .enable = rockchip_rk3066_pll_enable,
@@ -271,6 +321,7 @@ static const struct clk_ops rockchip_rk3066_pll_clk_ops = {
        .enable = rockchip_rk3066_pll_enable,
        .disable = rockchip_rk3066_pll_disable,
        .is_enabled = rockchip_rk3066_pll_is_enabled,
+       .init = rockchip_rk3066_pll_init,
 };
 
 /*
@@ -282,7 +333,7 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
                void __iomem *base, int con_offset, int grf_lock_offset,
                int lock_shift, int mode_offset, int mode_shift,
                struct rockchip_pll_rate_table *rate_table,
-               spinlock_t *lock)
+               u8 clk_pll_flags, spinlock_t *lock)
 {
        const char *pll_parents[3];
        struct clk_init_data init;
@@ -345,8 +396,22 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
        pll->reg_base = base + con_offset;
        pll->lock_offset = grf_lock_offset;
        pll->lock_shift = lock_shift;
+       pll->flags = clk_pll_flags;
        pll->lock = lock;
 
+       /* create the mux on top of the real pll */
+       pll->pll_mux_ops = &clk_mux_ops;
+       pll_mux = &pll->pll_mux;
+       pll_mux->reg = base + mode_offset;
+       pll_mux->shift = mode_shift;
+       pll_mux->mask = PLL_MODE_MASK;
+       pll_mux->flags = 0;
+       pll_mux->lock = lock;
+       pll_mux->hw.init = &init;
+
+       if (pll_type == pll_rk3066)
+               pll_mux->flags |= CLK_MUX_HIWORD_MASK;
+
        pll_clk = clk_register(NULL, &pll->hw);
        if (IS_ERR(pll_clk)) {
                pr_err("%s: failed to register pll clock %s : %ld\n",
@@ -355,10 +420,6 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
                goto err_pll;
        }
 
-       /* create the mux on top of the real pll */
-       pll->pll_mux_ops = &clk_mux_ops;
-       pll_mux = &pll->pll_mux;
-
        /* the actual muxing is xin24m, pll-output, xin32k */
        pll_parents[0] = parent_names[0];
        pll_parents[1] = pll_name;
@@ -370,16 +431,6 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
        init.parent_names = pll_parents;
        init.num_parents = ARRAY_SIZE(pll_parents);
 
-       pll_mux->reg = base + mode_offset;
-       pll_mux->shift = mode_shift;
-       pll_mux->mask = PLL_MODE_MASK;
-       pll_mux->flags = 0;
-       pll_mux->lock = lock;
-       pll_mux->hw.init = &init;
-
-       if (pll_type == pll_rk3066)
-               pll_mux->flags |= CLK_MUX_HIWORD_MASK;
-
        mux_clk = clk_register(NULL, &pll_mux->hw);
        if (IS_ERR(mux_clk))
                goto err_mux;
index beed49c79126bb1412cf7b217586aa8c0b6e532d..c54078960847c91f6f499ecb8f26924338bb2ab4 100644 (file)
@@ -212,13 +212,13 @@ PNAME(mux_sclk_macref_p)  = { "mac_src", "ext_rmii" };
 
 static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
        [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
-                    RK2928_MODE_CON, 0, 6, rk3188_pll_rates),
+                    RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates),
        [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
-                    RK2928_MODE_CON, 4, 5, NULL),
+                    RK2928_MODE_CON, 4, 5, 0, NULL),
        [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
-                    RK2928_MODE_CON, 8, 7, rk3188_pll_rates),
+                    RK2928_MODE_CON, 8, 7, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
        [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
-                    RK2928_MODE_CON, 12, 8, rk3188_pll_rates),
+                    RK2928_MODE_CON, 12, 8, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
 };
 
 #define MFLAGS CLK_MUX_HIWORD_MASK
@@ -257,9 +257,9 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
        GATE(0, "hclk_vdpu", "aclk_vdpu", 0,
                        RK2928_CLKGATE_CON(3), 12, GFLAGS),
 
-       GATE(0, "gpll_ddr", "gpll", 0,
+       GATE(0, "gpll_ddr", "gpll", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(1), 7, GFLAGS),
-       COMPOSITE(0, "ddrphy", mux_ddrphy_p, 0,
+       COMPOSITE(0, "ddrphy", mux_ddrphy_p, CLK_IGNORE_UNUSED,
                        RK2928_CLKSEL_CON(26), 8, 1, MFLAGS, 0, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
                        RK2928_CLKGATE_CON(0), 2, GFLAGS),
 
@@ -270,10 +270,10 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
                        RK2928_CLKGATE_CON(0), 6, GFLAGS),
        GATE(0, "pclk_cpu", "pclk_cpu_pre", 0,
                        RK2928_CLKGATE_CON(0), 5, GFLAGS),
-       GATE(0, "hclk_cpu", "hclk_cpu_pre", 0,
+       GATE(0, "hclk_cpu", "hclk_cpu_pre", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(0), 4, GFLAGS),
 
-       COMPOSITE(0, "aclk_lcdc0_pre", mux_pll_src_cpll_gpll_p, 0,
+       COMPOSITE(0, "aclk_lcdc0_pre", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
                        RK2928_CLKSEL_CON(31), 7, 1, MFLAGS, 0, 5, DFLAGS,
                        RK2928_CLKGATE_CON(3), 0, GFLAGS),
        COMPOSITE(0, "aclk_lcdc1_pre", mux_pll_src_cpll_gpll_p, 0,
@@ -304,9 +304,9 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
         * the 480m are generated inside the usb block from these clocks,
         * but they are also a source for the hsicphy clock.
         */
-       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", 0,
+       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(1), 5, GFLAGS),
-       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", 0,
+       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(1), 6, GFLAGS),
 
        COMPOSITE(0, "mac_src", mux_mac_p, 0,
@@ -320,9 +320,9 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
        COMPOSITE(0, "hsadc_src", mux_pll_src_gpll_cpll_p, 0,
                        RK2928_CLKSEL_CON(22), 0, 1, MFLAGS, 8, 8, DFLAGS,
                        RK2928_CLKGATE_CON(2), 6, GFLAGS),
-       COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src",
+       COMPOSITE_FRAC(0, "hsadc_frac", "hsadc_src", 0,
                        RK2928_CLKSEL_CON(23), 0,
-                       RK2928_CLKGATE_CON(2), 7, 0, GFLAGS),
+                       RK2928_CLKGATE_CON(2), 7, GFLAGS),
        MUX(SCLK_HSADC, "sclk_hsadc", mux_sclk_hsadc_p, 0,
                        RK2928_CLKSEL_CON(22), 4, 2, MFLAGS),
 
@@ -330,6 +330,15 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
                        RK2928_CLKSEL_CON(24), 8, 8, DFLAGS,
                        RK2928_CLKGATE_CON(2), 8, GFLAGS),
 
+       COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
+                       RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
+                       RK2928_CLKGATE_CON(0), 13, GFLAGS),
+       COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
+                       RK2928_CLKSEL_CON(9), 0,
+                       RK2928_CLKGATE_CON(0), 14, GFLAGS),
+       MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
+                       RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
+
        /*
         * Clock-Architecture Diagram 4
         */
@@ -399,8 +408,8 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
 
        /* aclk_cpu gates */
        GATE(ACLK_DMA1, "aclk_dma1", "aclk_cpu", 0, RK2928_CLKGATE_CON(5), 0, GFLAGS),
-       GATE(0, "aclk_intmem", "aclk_cpu", 0, RK2928_CLKGATE_CON(4), 12, GFLAGS),
-       GATE(0, "aclk_strc_sys", "aclk_cpu", 0, RK2928_CLKGATE_CON(4), 10, GFLAGS),
+       GATE(0, "aclk_intmem", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 12, GFLAGS),
+       GATE(0, "aclk_strc_sys", "aclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 10, GFLAGS),
 
        /* hclk_cpu gates */
        GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK2928_CLKGATE_CON(5), 6, GFLAGS),
@@ -410,14 +419,14 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
        /* hclk_ahb2apb is part of a clk branch */
        GATE(0, "hclk_vio_bus", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 12, GFLAGS),
        GATE(HCLK_LCDC0, "hclk_lcdc0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 1, GFLAGS),
-       GATE(HCLK_LCDC1, "hclk_lcdc1", "aclk_cpu", 0, RK2928_CLKGATE_CON(6), 2, GFLAGS),
+       GATE(HCLK_LCDC1, "hclk_lcdc1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 2, GFLAGS),
        GATE(HCLK_CIF0, "hclk_cif0", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 4, GFLAGS),
        GATE(HCLK_IPP, "hclk_ipp", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 9, GFLAGS),
        GATE(HCLK_RGA, "hclk_rga", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 10, GFLAGS),
 
        /* hclk_peri gates */
-       GATE(0, "hclk_peri_axi_matrix", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 0, GFLAGS),
-       GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 6, GFLAGS),
+       GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS),
+       GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS),
        GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS),
        GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
        GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
@@ -457,18 +466,18 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
        GATE(0, "pclk_ddrupctl", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 7, GFLAGS),
        GATE(0, "pclk_ddrpubl", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 6, GFLAGS),
        GATE(0, "pclk_dbg", "pclk_cpu", 0, RK2928_CLKGATE_CON(9), 1, GFLAGS),
-       GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 4, GFLAGS),
-       GATE(PCLK_PMU, "pclk_pmu", "pclk_cpu", 0, RK2928_CLKGATE_CON(5), 5, GFLAGS),
+       GATE(PCLK_GRF, "pclk_grf", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 4, GFLAGS),
+       GATE(PCLK_PMU, "pclk_pmu", "pclk_cpu", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 5, GFLAGS),
 
        /* aclk_peri */
        GATE(ACLK_DMA2, "aclk_dma2", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 1, GFLAGS),
        GATE(ACLK_SMC, "aclk_smc", "aclk_peri", 0, RK2928_CLKGATE_CON(5), 8, GFLAGS),
-       GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 4, GFLAGS),
-       GATE(0, "aclk_cpu_peri", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 2, GFLAGS),
-       GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 0, RK2928_CLKGATE_CON(4), 3, GFLAGS),
+       GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 4, GFLAGS),
+       GATE(0, "aclk_cpu_peri", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 2, GFLAGS),
+       GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 3, GFLAGS),
 
        /* pclk_peri gates */
-       GATE(0, "pclk_peri_axi_matrix", "pclk_peri", 0, RK2928_CLKGATE_CON(4), 1, GFLAGS),
+       GATE(0, "pclk_peri_axi_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 1, GFLAGS),
        GATE(PCLK_PWM23, "pclk_pwm23", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 11, GFLAGS),
        GATE(PCLK_WDT, "pclk_wdt", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 15, GFLAGS),
        GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK2928_CLKGATE_CON(7), 12, GFLAGS),
@@ -511,7 +520,7 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
                                                            | CLK_DIVIDER_READ_ONLY,
                        RK2928_CLKGATE_CON(4), 9, GFLAGS),
 
-       GATE(CORE_L2C, "core_l2c", "aclk_cpu", 0,
+       GATE(CORE_L2C, "core_l2c", "aclk_cpu", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(9), 4, GFLAGS),
 
        COMPOSITE(0, "aclk_peri_pre", mux_pll_src_gpll_cpll_p, 0,
@@ -577,14 +586,6 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
                        RK2928_CLKGATE_CON(0), 12, GFLAGS),
        MUX(SCLK_I2S2, "sclk_i2s2", mux_sclk_i2s2_p, 0,
                        RK2928_CLKSEL_CON(4), 8, 2, MFLAGS),
-       COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
-                       RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
-                       RK2928_CLKGATE_CON(0), 13, GFLAGS),
-       COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
-                       RK2928_CLKSEL_CON(9), 0,
-                       RK2928_CLKGATE_CON(0), 14, GFLAGS),
-       MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
-                       RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
 
        GATE(HCLK_I2S1, "hclk_i2s1", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
        GATE(HCLK_I2S2, "hclk_i2s2", "hclk_cpu", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
@@ -618,7 +619,7 @@ PNAME(mux_hsicphy_p)                = { "sclk_otgphy0", "sclk_otgphy1",
                                    "gpll", "cpll" };
 
 static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
-       COMPOSITE_NOMUX_DIVTBL(0, "aclk_core", "armclk", 0,
+       COMPOSITE_NOMUX_DIVTBL(0, "aclk_core", "armclk", CLK_IGNORE_UNUSED,
                        RK2928_CLKSEL_CON(1), 3, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
                        div_rk3188_aclk_core_t, RK2928_CLKGATE_CON(0), 7, GFLAGS),
 
@@ -633,7 +634,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
                        RK2928_CLKSEL_CON(1), 14, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
                        RK2928_CLKGATE_CON(4), 9, GFLAGS),
 
-       GATE(CORE_L2C, "core_l2c", "armclk", 0,
+       GATE(CORE_L2C, "core_l2c", "armclk", CLK_IGNORE_UNUSED,
                        RK2928_CLKGATE_CON(9), 4, GFLAGS),
 
        COMPOSITE(0, "aclk_peri_pre", mux_pll_src_cpll_gpll_p, 0,
@@ -663,7 +664,7 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
                        RK2928_CLKSEL_CON(30), 0, 2, DFLAGS,
                        RK2928_CLKGATE_CON(3), 6, GFLAGS),
        DIV(0, "sclk_hsicphy_12m", "sclk_hsicphy_480m", 0,
-                       RK2928_CLKGATE_CON(11), 8, 6, DFLAGS),
+                       RK2928_CLKSEL_CON(11), 8, 6, DFLAGS),
 
        MUX(0, "i2s_src", mux_pll_src_gpll_cpll_p, 0,
                        RK2928_CLKSEL_CON(2), 15, 1, MFLAGS),
@@ -675,14 +676,6 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
                        RK2928_CLKGATE_CON(0), 10, GFLAGS),
        MUX(SCLK_I2S0, "sclk_i2s0", mux_sclk_i2s0_p, 0,
                        RK2928_CLKSEL_CON(3), 8, 2, MFLAGS),
-       COMPOSITE_NOMUX(0, "spdif_pre", "i2s_src", 0,
-                       RK2928_CLKSEL_CON(5), 0, 7, DFLAGS,
-                       RK2928_CLKGATE_CON(13), 13, GFLAGS),
-       COMPOSITE_FRAC(0, "spdif_frac", "spdif_pll", 0,
-                       RK2928_CLKSEL_CON(9), 0,
-                       RK2928_CLKGATE_CON(0), 14, GFLAGS),
-       MUX(SCLK_SPDIF, "sclk_spdif", mux_sclk_spdif_p, 0,
-                       RK2928_CLKSEL_CON(5), 8, 2, MFLAGS),
 
        GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
        GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
index 23278291da448de806a9a8929fe300c6952aafec..ac6be7c0132d1e27cfad2f95169212b70f3d31d2 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
+#include <linux/syscore_ops.h>
 #include <dt-bindings/clock/rk3288-cru.h>
 #include "clk.h"
 
@@ -83,11 +84,13 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = {
        RK3066_PLL_RATE( 742500000, 8, 495, 2),
        RK3066_PLL_RATE( 696000000, 1, 58, 2),
        RK3066_PLL_RATE( 600000000, 1, 50, 2),
-       RK3066_PLL_RATE( 594000000, 2, 198, 4),
+       RK3066_PLL_RATE_BWADJ(594000000, 1, 198, 8, 1),
        RK3066_PLL_RATE( 552000000, 1, 46, 2),
        RK3066_PLL_RATE( 504000000, 1, 84, 4),
+       RK3066_PLL_RATE( 500000000, 3, 125, 2),
        RK3066_PLL_RATE( 456000000, 1, 76, 4),
        RK3066_PLL_RATE( 408000000, 1, 68, 4),
+       RK3066_PLL_RATE( 400000000, 3, 100, 2),
        RK3066_PLL_RATE( 384000000, 2, 128, 4),
        RK3066_PLL_RATE( 360000000, 1, 60, 4),
        RK3066_PLL_RATE( 312000000, 1, 52, 4),
@@ -173,14 +176,14 @@ PNAME(mux_aclk_cpu_src_p) = { "cpll_aclk_cpu", "gpll_aclk_cpu" };
 PNAME(mux_pll_src_cpll_gpll_p)         = { "cpll", "gpll" };
 PNAME(mux_pll_src_npll_cpll_gpll_p)    = { "npll", "cpll", "gpll" };
 PNAME(mux_pll_src_cpll_gpll_npll_p)    = { "cpll", "gpll", "npll" };
-PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "usb480m" };
+PNAME(mux_pll_src_cpll_gpll_usb480m_p) = { "cpll", "gpll", "usbphy480m_src" };
+PNAME(mux_pll_src_cpll_gll_usb_npll_p) = { "cpll", "gpll", "usbphy480m_src", "npll" };
 
 PNAME(mux_mmc_src_p)   = { "cpll", "gpll", "xin24m", "xin24m" };
 PNAME(mux_i2s_pre_p)   = { "i2s_src", "i2s_frac", "ext_i2s", "xin12m" };
 PNAME(mux_i2s_clkout_p)        = { "i2s_pre", "xin12m" };
 PNAME(mux_spdif_p)     = { "spdif_pre", "spdif_frac", "xin12m" };
 PNAME(mux_spdif_8ch_p) = { "spdif_8ch_pre", "spdif_8ch_frac", "xin12m" };
-PNAME(mux_uart0_pll_p) = { "cpll", "gpll", "usbphy_480m_src", "npll" };
 PNAME(mux_uart0_p)     = { "uart0_src", "uart0_frac", "xin24m" };
 PNAME(mux_uart1_p)     = { "uart1_src", "uart1_frac", "xin24m" };
 PNAME(mux_uart2_p)     = { "uart2_src", "uart2_frac", "xin24m" };
@@ -192,22 +195,22 @@ PNAME(mux_hsadcout_p)     = { "hsadc_src", "ext_hsadc" };
 PNAME(mux_edp_24m_p)   = { "ext_edp_24m", "xin24m" };
 PNAME(mux_tspout_p)    = { "cpll", "gpll", "npll", "xin27m" };
 
-PNAME(mux_usbphy480m_p)                = { "sclk_otgphy0", "sclk_otgphy1",
-                                   "sclk_otgphy2" };
+PNAME(mux_usbphy480m_p)                = { "sclk_otgphy1", "sclk_otgphy2",
+                                   "sclk_otgphy0" };
 PNAME(mux_hsicphy480m_p)       = { "cpll", "gpll", "usbphy480m_src" };
 PNAME(mux_hsicphy12m_p)                = { "hsicphy12m_xin12m", "hsicphy12m_usbphy" };
 
 static struct rockchip_pll_clock rk3288_pll_clks[] __initdata = {
        [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK3288_PLL_CON(0),
-                    RK3288_MODE_CON, 0, 6, rk3288_pll_rates),
+                    RK3288_MODE_CON, 0, 6, 0, rk3288_pll_rates),
        [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK3288_PLL_CON(4),
-                    RK3288_MODE_CON, 4, 5, NULL),
+                    RK3288_MODE_CON, 4, 5, 0, NULL),
        [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK3288_PLL_CON(8),
-                    RK3288_MODE_CON, 8, 7, rk3288_pll_rates),
+                    RK3288_MODE_CON, 8, 7, ROCKCHIP_PLL_SYNC_RATE, rk3288_pll_rates),
        [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK3288_PLL_CON(12),
-                    RK3288_MODE_CON, 12, 8, rk3288_pll_rates),
+                    RK3288_MODE_CON, 12, 8, ROCKCHIP_PLL_SYNC_RATE, rk3288_pll_rates),
        [npll] = PLL(pll_rk3066, PLL_NPLL, "npll",  mux_pll_p, 0, RK3288_PLL_CON(16),
-                    RK3288_MODE_CON, 14, 9, rk3288_pll_rates),
+                    RK3288_MODE_CON, 14, 9, ROCKCHIP_PLL_SYNC_RATE, rk3288_pll_rates),
 };
 
 static struct clk_div_table div_hclk_cpu_t[] = {
@@ -226,67 +229,67 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
         * Clock-Architecture Diagram 1
         */
 
-       GATE(0, "apll_core", "apll", 0,
+       GATE(0, "apll_core", "apll", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(0), 1, GFLAGS),
-       GATE(0, "gpll_core", "gpll", 0,
+       GATE(0, "gpll_core", "gpll", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(0), 2, GFLAGS),
 
-       COMPOSITE_NOMUX(0, "armcore0", "armclk", 0,
+       COMPOSITE_NOMUX(0, "armcore0", "armclk", CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(36), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
                        RK3288_CLKGATE_CON(12), 0, GFLAGS),
-       COMPOSITE_NOMUX(0, "armcore1", "armclk", 0,
+       COMPOSITE_NOMUX(0, "armcore1", "armclk", CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(36), 4, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
                        RK3288_CLKGATE_CON(12), 1, GFLAGS),
-       COMPOSITE_NOMUX(0, "armcore2", "armclk", 0,
+       COMPOSITE_NOMUX(0, "armcore2", "armclk", CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(36), 8, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
                        RK3288_CLKGATE_CON(12), 2, GFLAGS),
-       COMPOSITE_NOMUX(0, "armcore3", "armclk", 0,
+       COMPOSITE_NOMUX(0, "armcore3", "armclk", CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(36), 12, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
                        RK3288_CLKGATE_CON(12), 3, GFLAGS),
-       COMPOSITE_NOMUX(0, "l2ram", "armclk", 0,
+       COMPOSITE_NOMUX(0, "l2ram", "armclk", CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(37), 0, 3, DFLAGS | CLK_DIVIDER_READ_ONLY,
                        RK3288_CLKGATE_CON(12), 4, GFLAGS),
-       COMPOSITE_NOMUX(0, "aclk_core_m0", "armclk", 0,
+       COMPOSITE_NOMUX(0, "aclk_core_m0", "armclk", CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(0), 0, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
                        RK3288_CLKGATE_CON(12), 5, GFLAGS),
-       COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", 0,
+       COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
                        RK3288_CLKGATE_CON(12), 6, GFLAGS),
        COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
                        RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
                        RK3288_CLKGATE_CON(12), 7, GFLAGS),
-       COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", 0,
+       COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
                        RK3288_CLKGATE_CON(12), 8, GFLAGS),
        GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
                        RK3288_CLKGATE_CON(12), 9, GFLAGS),
-       GATE(0, "cs_dbg", "pclk_dbg_pre", 0,
+       GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(12), 10, GFLAGS),
        GATE(0, "pclk_core_niu", "pclk_dbg_pre", 0,
                        RK3288_CLKGATE_CON(12), 11, GFLAGS),
 
-       GATE(0, "dpll_ddr", "dpll", 0,
+       GATE(0, "dpll_ddr", "dpll", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(0), 8, GFLAGS),
        GATE(0, "gpll_ddr", "gpll", 0,
                        RK3288_CLKGATE_CON(0), 9, GFLAGS),
-       COMPOSITE_NOGATE(0, "ddrphy", mux_ddrphy_p, 0,
+       COMPOSITE_NOGATE(0, "ddrphy", mux_ddrphy_p, CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(26), 2, 1, MFLAGS, 0, 2,
                                        DFLAGS | CLK_DIVIDER_POWER_OF_TWO),
 
-       GATE(0, "gpll_aclk_cpu", "gpll", 0,
+       GATE(0, "gpll_aclk_cpu", "gpll", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(0), 10, GFLAGS),
-       GATE(0, "cpll_aclk_cpu", "cpll", 0,
+       GATE(0, "cpll_aclk_cpu", "cpll", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(0), 11, GFLAGS),
-       COMPOSITE_NOGATE(0, "aclk_cpu_src", mux_aclk_cpu_src_p, 0,
+       COMPOSITE_NOGATE(0, "aclk_cpu_src", mux_aclk_cpu_src_p, CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(1), 15, 1, MFLAGS, 3, 5, DFLAGS),
-       DIV(0, "aclk_cpu_pre", "aclk_cpu_src", 0,
+       DIV(0, "aclk_cpu_pre", "aclk_cpu_src", CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(1), 0, 3, DFLAGS),
-       GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_pre", 0,
+       GATE(ACLK_CPU, "aclk_cpu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(0), 3, GFLAGS),
-       COMPOSITE_NOMUX(PCLK_CPU, "pclk_cpu", "aclk_cpu_pre", 0,
+       COMPOSITE_NOMUX(PCLK_CPU, "pclk_cpu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(1), 12, 3, DFLAGS,
                        RK3288_CLKGATE_CON(0), 5, GFLAGS),
-       COMPOSITE_NOMUX_DIVTBL(HCLK_CPU, "hclk_cpu", "aclk_cpu_pre", 0,
+       COMPOSITE_NOMUX_DIVTBL(HCLK_CPU, "hclk_cpu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(1), 8, 2, DFLAGS, div_hclk_cpu_t,
                        RK3288_CLKGATE_CON(0), 4, GFLAGS),
        GATE(0, "c2c_host", "aclk_cpu_src", 0,
@@ -294,7 +297,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        COMPOSITE_NOMUX(0, "crypto", "aclk_cpu_pre", 0,
                        RK3288_CLKSEL_CON(26), 6, 2, DFLAGS,
                        RK3288_CLKGATE_CON(5), 4, GFLAGS),
-       GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", 0,
+       GATE(0, "aclk_bus_2pmu", "aclk_cpu_pre", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(0), 7, GFLAGS),
 
        COMPOSITE(0, "i2s_src", mux_pll_src_cpll_gpll_p, 0,
@@ -305,7 +308,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
                        RK3288_CLKGATE_CON(4), 2, GFLAGS),
        MUX(0, "i2s_pre", mux_i2s_pre_p, CLK_SET_RATE_PARENT,
                        RK3288_CLKSEL_CON(4), 8, 2, MFLAGS),
-       COMPOSITE_NODIV(0, "i2s0_clkout", mux_i2s_clkout_p, CLK_SET_RATE_PARENT,
+       COMPOSITE_NODIV(SCLK_I2S0_OUT, "i2s0_clkout", mux_i2s_clkout_p, 0,
                        RK3288_CLKSEL_CON(4), 12, 1, MFLAGS,
                        RK3288_CLKGATE_CON(4), 0, GFLAGS),
        GATE(SCLK_I2S0, "sclk_i2s0", "i2s_pre", CLK_SET_RATE_PARENT,
@@ -325,7 +328,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        COMPOSITE_NOMUX(0, "spdif_8ch_pre", "spdif_src", 0,
                        RK3288_CLKSEL_CON(40), 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(4), 7, GFLAGS),
-       COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_src", 0,
+       COMPOSITE_FRAC(0, "spdif_8ch_frac", "spdif_8ch_pre", 0,
                        RK3288_CLKSEL_CON(41), 0,
                        RK3288_CLKGATE_CON(4), 8, GFLAGS),
        COMPOSITE_NODIV(SCLK_SPDIF8CH, "sclk_spdif_8ch", mux_spdif_8ch_p, 0,
@@ -373,12 +376,12 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(HCLK_VCODEC, "hclk_vcodec", "hclk_vcodec_pre", 0,
                RK3288_CLKGATE_CON(9), 1, GFLAGS),
 
-       COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, 0,
+       COMPOSITE(0, "aclk_vio0", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(31), 6, 2, MFLAGS, 0, 5, DFLAGS,
                        RK3288_CLKGATE_CON(3), 0, GFLAGS),
        DIV(0, "hclk_vio", "aclk_vio0", 0,
                        RK3288_CLKSEL_CON(28), 8, 5, DFLAGS),
-       COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, 0,
+       COMPOSITE(0, "aclk_vio1", mux_pll_src_cpll_gpll_usb480m_p, CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(31), 14, 2, MFLAGS, 8, 5, DFLAGS,
                        RK3288_CLKGATE_CON(3), 2, GFLAGS),
 
@@ -436,24 +439,24 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
 
        DIV(0, "pclk_pd_alive", "gpll", 0,
                        RK3288_CLKSEL_CON(33), 8, 5, DFLAGS),
-       COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", 0,
+       COMPOSITE_NOMUX(0, "pclk_pd_pmu", "gpll", CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(33), 0, 5, DFLAGS,
                        RK3288_CLKGATE_CON(5), 8, GFLAGS),
 
-       COMPOSITE(SCLK_GPU, "sclk_gpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
+       COMPOSITE(SCLK_GPU, "sclk_gpu", mux_pll_src_cpll_gll_usb_npll_p, 0,
                        RK3288_CLKSEL_CON(34), 6, 2, MFLAGS, 0, 5, DFLAGS,
                        RK3288_CLKGATE_CON(5), 7, GFLAGS),
 
-       COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, 0,
+       COMPOSITE(0, "aclk_peri_src", mux_pll_src_cpll_gpll_p, CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(10), 15, 1, MFLAGS, 0, 5, DFLAGS,
                        RK3288_CLKGATE_CON(2), 0, GFLAGS),
        COMPOSITE_NOMUX(PCLK_PERI, "pclk_peri", "aclk_peri_src", 0,
                        RK3288_CLKSEL_CON(10), 12, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
                        RK3288_CLKGATE_CON(2), 3, GFLAGS),
-       COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", 0,
+       COMPOSITE_NOMUX(HCLK_PERI, "hclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
                        RK3288_CLKSEL_CON(10), 8, 2, DFLAGS | CLK_DIVIDER_POWER_OF_TWO,
                        RK3288_CLKGATE_CON(2), 2, GFLAGS),
-       GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", 0,
+       GATE(ACLK_PERI, "aclk_peri", "aclk_peri_src", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(2), 1, GFLAGS),
 
        /*
@@ -483,6 +486,18 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
                        RK3288_CLKSEL_CON(12), 14, 2, MFLAGS, 8, 6, DFLAGS,
                        RK3288_CLKGATE_CON(13), 3, GFLAGS),
 
+       MMC(SCLK_SDMMC_DRV,    "sdmmc_drv",    "sclk_sdmmc", RK3288_SDMMC_CON0, 1),
+       MMC(SCLK_SDMMC_SAMPLE, "sdmmc_sample", "sclk_sdmmc", RK3288_SDMMC_CON1, 0),
+
+       MMC(SCLK_SDIO0_DRV,    "sdio0_drv",    "sclk_sdio0", RK3288_SDIO0_CON0, 1),
+       MMC(SCLK_SDIO0_SAMPLE, "sdio0_sample", "sclk_sdio0", RK3288_SDIO0_CON1, 0),
+
+       MMC(SCLK_SDIO1_DRV,    "sdio1_drv",    "sclk_sdio1", RK3288_SDIO1_CON0, 1),
+       MMC(SCLK_SDIO1_SAMPLE, "sdio1_sample", "sclk_sdio1", RK3288_SDIO1_CON1, 0),
+
+       MMC(SCLK_EMMC_DRV,     "emmc_drv",     "sclk_emmc",  RK3288_EMMC_CON0,  1),
+       MMC(SCLK_EMMC_SAMPLE,  "emmc_sample",  "sclk_emmc",  RK3288_EMMC_CON1,  0),
+
        COMPOSITE(0, "sclk_tspout", mux_tspout_p, 0,
                        RK3288_CLKSEL_CON(35), 14, 2, MFLAGS, 8, 5, DFLAGS,
                        RK3288_CLKGATE_CON(4), 11, GFLAGS),
@@ -490,13 +505,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
                        RK3288_CLKSEL_CON(35), 6, 2, MFLAGS, 0, 5, DFLAGS,
                        RK3288_CLKGATE_CON(4), 10, GFLAGS),
 
-       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", 0,
+       GATE(SCLK_OTGPHY0, "sclk_otgphy0", "usb480m", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(13), 4, GFLAGS),
-       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", 0,
+       GATE(SCLK_OTGPHY1, "sclk_otgphy1", "usb480m", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(13), 5, GFLAGS),
-       GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", 0,
+       GATE(SCLK_OTGPHY2, "sclk_otgphy2", "usb480m", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(13), 6, GFLAGS),
-       GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", 0,
+       GATE(SCLK_OTG_ADP, "sclk_otg_adp", "xin32k", CLK_IGNORE_UNUSED,
                        RK3288_CLKGATE_CON(13), 7, GFLAGS),
 
        COMPOSITE_NOMUX(SCLK_TSADC, "sclk_tsadc", "xin32k", 0,
@@ -517,7 +532,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
                        RK3288_CLKSEL_CON(38), 15, 1, MFLAGS, 8, 5, DFLAGS,
                        RK3288_CLKGATE_CON(5), 6, GFLAGS),
 
-       COMPOSITE(0, "uart0_src", mux_uart0_pll_p, 0,
+       COMPOSITE(0, "uart0_src", mux_pll_src_cpll_gll_usb_npll_p, 0,
                        RK3288_CLKSEL_CON(13), 13, 2, MFLAGS, 0, 7, DFLAGS,
                        RK3288_CLKGATE_CON(1), 8, GFLAGS),
        COMPOSITE_FRAC(0, "uart0_frac", "uart0_src", 0,
@@ -585,7 +600,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
 
        COMPOSITE_NODIV(0, "usbphy480m_src", mux_usbphy480m_p, 0,
                        RK3288_CLKSEL_CON(13), 11, 2, MFLAGS,
-                       RK3288_CLKGATE_CON(5), 15, GFLAGS),
+                       RK3288_CLKGATE_CON(5), 14, GFLAGS),
        COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
                        RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
                        RK3288_CLKGATE_CON(3), 6, GFLAGS),
@@ -601,19 +616,19 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
         */
 
        /* aclk_cpu gates */
-       GATE(0, "sclk_intmem0", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 5, GFLAGS),
-       GATE(0, "sclk_intmem1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 6, GFLAGS),
-       GATE(0, "sclk_intmem2", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 7, GFLAGS),
+       GATE(0, "sclk_intmem0", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 5, GFLAGS),
+       GATE(0, "sclk_intmem1", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 6, GFLAGS),
+       GATE(0, "sclk_intmem2", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 7, GFLAGS),
        GATE(ACLK_DMAC1, "aclk_dmac1", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 12, GFLAGS),
-       GATE(0, "aclk_strc_sys", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 13, GFLAGS),
-       GATE(0, "aclk_intmem", "aclk_cpu", 0, RK3288_CLKGATE_CON(10), 4, GFLAGS),
+       GATE(0, "aclk_strc_sys", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 13, GFLAGS),
+       GATE(0, "aclk_intmem", "aclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 4, GFLAGS),
        GATE(ACLK_CRYPTO, "aclk_crypto", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 6, GFLAGS),
        GATE(0, "aclk_ccp", "aclk_cpu", 0, RK3288_CLKGATE_CON(11), 8, GFLAGS),
 
        /* hclk_cpu gates */
        GATE(HCLK_CRYPTO, "hclk_crypto", "hclk_cpu", 0, RK3288_CLKGATE_CON(11), 7, GFLAGS),
        GATE(HCLK_I2S0, "hclk_i2s0", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 8, GFLAGS),
-       GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 9, GFLAGS),
+       GATE(HCLK_ROM, "hclk_rom", "hclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(10), 9, GFLAGS),
        GATE(HCLK_SPDIF, "hclk_spdif", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 10, GFLAGS),
        GATE(HCLK_SPDIF8CH, "hclk_spdif_8ch", "hclk_cpu", 0, RK3288_CLKGATE_CON(10), 11, GFLAGS),
 
@@ -622,42 +637,42 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(PCLK_TIMER, "pclk_timer", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 1, GFLAGS),
        GATE(PCLK_I2C0, "pclk_i2c0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 2, GFLAGS),
        GATE(PCLK_I2C2, "pclk_i2c2", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 3, GFLAGS),
-       GATE(0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS),
-       GATE(0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS),
-       GATE(0, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS),
-       GATE(0, "pclk_publ1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 1, GFLAGS),
+       GATE(PCLK_DDRUPCTL0, "pclk_ddrupctl0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 14, GFLAGS),
+       GATE(PCLK_PUBL0, "pclk_publ0", "pclk_cpu", 0, RK3288_CLKGATE_CON(10), 15, GFLAGS),
+       GATE(PCLK_DDRUPCTL1, "pclk_ddrupctl1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 0, GFLAGS),
+       GATE(PCLK_PUBL1, "pclk_publ1", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 1, GFLAGS),
        GATE(0, "pclk_efuse_1024", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 2, GFLAGS),
        GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
        GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
        GATE(0, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
-       GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
+       GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
 
        /* ddrctrl [DDR Controller PHY clock] gates */
-       GATE(0, "nclk_ddrupctl0", "ddrphy", 0, RK3288_CLKGATE_CON(11), 4, GFLAGS),
-       GATE(0, "nclk_ddrupctl1", "ddrphy", 0, RK3288_CLKGATE_CON(11), 5, GFLAGS),
+       GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
+       GATE(0, "nclk_ddrupctl1", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 5, GFLAGS),
 
        /* ddrphy gates */
-       GATE(0, "sclk_ddrphy0", "ddrphy", 0, RK3288_CLKGATE_CON(4), 12, GFLAGS),
-       GATE(0, "sclk_ddrphy1", "ddrphy", 0, RK3288_CLKGATE_CON(4), 13, GFLAGS),
+       GATE(0, "sclk_ddrphy0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(4), 12, GFLAGS),
+       GATE(0, "sclk_ddrphy1", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(4), 13, GFLAGS),
 
        /* aclk_peri gates */
-       GATE(0, "aclk_peri_axi_matrix", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 2, GFLAGS),
+       GATE(0, "aclk_peri_axi_matrix", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 2, GFLAGS),
        GATE(ACLK_DMAC2, "aclk_dmac2", "aclk_peri", 0, RK3288_CLKGATE_CON(6), 3, GFLAGS),
-       GATE(0, "aclk_peri_niu", "aclk_peri", 0, RK3288_CLKGATE_CON(7), 11, GFLAGS),
-       GATE(ACLK_MMU, "aclk_mmu", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 12, GFLAGS),
+       GATE(0, "aclk_peri_niu", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 11, GFLAGS),
+       GATE(ACLK_MMU, "aclk_mmu", "aclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(8), 12, GFLAGS),
        GATE(ACLK_GMAC, "aclk_gmac", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 0, GFLAGS),
        GATE(HCLK_GPS, "hclk_gps", "aclk_peri", 0, RK3288_CLKGATE_CON(8), 2, GFLAGS),
 
        /* hclk_peri gates */
-       GATE(0, "hclk_peri_matrix", "hclk_peri", 0, RK3288_CLKGATE_CON(6), 0, GFLAGS),
-       GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 4, GFLAGS),
+       GATE(0, "hclk_peri_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 0, GFLAGS),
+       GATE(HCLK_OTG0, "hclk_otg0", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 4, GFLAGS),
        GATE(HCLK_USBHOST0, "hclk_host0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 6, GFLAGS),
-       GATE(HCLK_USBHOST1, "hclk_host1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 7, GFLAGS),
+       GATE(HCLK_USBHOST1, "hclk_host1", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 7, GFLAGS),
        GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 8, GFLAGS),
-       GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 9, GFLAGS),
-       GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 10, GFLAGS),
-       GATE(0, "hclk_emem", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 12, GFLAGS),
-       GATE(0, "hclk_mem", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 13, GFLAGS),
+       GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 9, GFLAGS),
+       GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 10, GFLAGS),
+       GATE(0, "hclk_emem", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 12, GFLAGS),
+       GATE(0, "hclk_mem", "hclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(7), 13, GFLAGS),
        GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 14, GFLAGS),
        GATE(HCLK_NANDC1, "hclk_nandc1", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 15, GFLAGS),
        GATE(HCLK_TSP, "hclk_tsp", "hclk_peri", 0, RK3288_CLKGATE_CON(8), 8, GFLAGS),
@@ -669,7 +684,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(0, "pmu_hclk_otg0", "hclk_peri", 0, RK3288_CLKGATE_CON(7), 5, GFLAGS),
 
        /* pclk_peri gates */
-       GATE(0, "pclk_peri_matrix", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 1, GFLAGS),
+       GATE(0, "pclk_peri_matrix", "pclk_peri", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(6), 1, GFLAGS),
        GATE(PCLK_SPI0, "pclk_spi0", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 4, GFLAGS),
        GATE(PCLK_SPI1, "pclk_spi1", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 5, GFLAGS),
        GATE(PCLK_SPI2, "pclk_spi2", "pclk_peri", 0, RK3288_CLKGATE_CON(6), 6, GFLAGS),
@@ -705,48 +720,48 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
        GATE(PCLK_GPIO4, "pclk_gpio4", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 4, GFLAGS),
        GATE(PCLK_GPIO5, "pclk_gpio5", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 5, GFLAGS),
        GATE(PCLK_GPIO6, "pclk_gpio6", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 6, GFLAGS),
-       GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 11, GFLAGS),
-       GATE(0, "pclk_alive_niu", "pclk_pd_alive", 0, RK3288_CLKGATE_CON(14), 12, GFLAGS),
+       GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(14), 11, GFLAGS),
+       GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(14), 12, GFLAGS),
 
        /* pclk_pd_pmu gates */
-       GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 0, GFLAGS),
-       GATE(0, "pclk_intmem1", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 1, GFLAGS),
-       GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 2, GFLAGS),
-       GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 3, GFLAGS),
+       GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 0, GFLAGS),
+       GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 1, GFLAGS),
+       GATE(0, "pclk_pmu_niu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 2, GFLAGS),
+       GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(17), 3, GFLAGS),
        GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3288_CLKGATE_CON(17), 4, GFLAGS),
 
        /* hclk_vio gates */
        GATE(HCLK_RGA, "hclk_rga", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 1, GFLAGS),
        GATE(HCLK_VOP0, "hclk_vop0", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 6, GFLAGS),
        GATE(HCLK_VOP1, "hclk_vop1", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 8, GFLAGS),
-       GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 9, GFLAGS),
-       GATE(HCLK_VIO_NIU, "hclk_vio_niu", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 10, GFLAGS),
+       GATE(HCLK_VIO_AHB_ARBI, "hclk_vio_ahb_arbi", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 9, GFLAGS),
+       GATE(HCLK_VIO_NIU, "hclk_vio_niu", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 10, GFLAGS),
        GATE(HCLK_VIP, "hclk_vip", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 15, GFLAGS),
        GATE(HCLK_IEP, "hclk_iep", "hclk_vio", 0, RK3288_CLKGATE_CON(15), 3, GFLAGS),
        GATE(HCLK_ISP, "hclk_isp", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 1, GFLAGS),
-       GATE(HCLK_VIO2_H2P, "hclk_vio2_h2p", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 10, GFLAGS),
+       GATE(HCLK_VIO2_H2P, "hclk_vio2_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(16), 10, GFLAGS),
        GATE(PCLK_MIPI_DSI0, "pclk_mipi_dsi0", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 4, GFLAGS),
        GATE(PCLK_MIPI_DSI1, "pclk_mipi_dsi1", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 5, GFLAGS),
        GATE(PCLK_MIPI_CSI, "pclk_mipi_csi", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 6, GFLAGS),
        GATE(PCLK_LVDS_PHY, "pclk_lvds_phy", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 7, GFLAGS),
-       GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 8, GFLAGS),
+       GATE(PCLK_EDP_CTRL, "pclk_edp_ctrl", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(16), 8, GFLAGS),
        GATE(PCLK_HDMI_CTRL, "pclk_hdmi_ctrl", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 9, GFLAGS),
-       GATE(PCLK_VIO2_H2P, "pclk_vio2_h2p", "hclk_vio", 0, RK3288_CLKGATE_CON(16), 11, GFLAGS),
+       GATE(PCLK_VIO2_H2P, "pclk_vio2_h2p", "hclk_vio", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(16), 11, GFLAGS),
 
        /* aclk_vio0 gates */
        GATE(ACLK_VOP0, "aclk_vop0", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 5, GFLAGS),
        GATE(ACLK_IEP, "aclk_iep", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 2, GFLAGS),
-       GATE(ACLK_VIO0_NIU, "aclk_vio0_niu", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 11, GFLAGS),
+       GATE(ACLK_VIO0_NIU, "aclk_vio0_niu", "aclk_vio0", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 11, GFLAGS),
        GATE(ACLK_VIP, "aclk_vip", "aclk_vio0", 0, RK3288_CLKGATE_CON(15), 14, GFLAGS),
 
        /* aclk_vio1 gates */
        GATE(ACLK_VOP1, "aclk_vop1", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 7, GFLAGS),
        GATE(ACLK_ISP, "aclk_isp", "aclk_vio1", 0, RK3288_CLKGATE_CON(16), 2, GFLAGS),
-       GATE(ACLK_VIO1_NIU, "aclk_vio1_niu", "aclk_vio1", 0, RK3288_CLKGATE_CON(15), 12, GFLAGS),
+       GATE(ACLK_VIO1_NIU, "aclk_vio1_niu", "aclk_vio1", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 12, GFLAGS),
 
        /* aclk_rga_pre gates */
        GATE(ACLK_RGA, "aclk_rga", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 0, GFLAGS),
-       GATE(ACLK_RGA_NIU, "aclk_rga_niu", "aclk_rga_pre", 0, RK3288_CLKGATE_CON(15), 13, GFLAGS),
+       GATE(ACLK_RGA_NIU, "aclk_rga_niu", "aclk_rga_pre", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(15), 13, GFLAGS),
 
        /*
         * Other ungrouped clocks.
@@ -762,6 +777,64 @@ static const char *rk3288_critical_clocks[] __initconst = {
        "hclk_peri",
 };
 
+#ifdef CONFIG_PM_SLEEP
+static void __iomem *rk3288_cru_base;
+
+/* Some CRU registers will be reset in maskrom when the system
+ * wakes up from fastboot.
+ * So save them before suspend, restore them after resume.
+ */
+static const int rk3288_saved_cru_reg_ids[] = {
+       RK3288_MODE_CON,
+       RK3288_CLKSEL_CON(0),
+       RK3288_CLKSEL_CON(1),
+       RK3288_CLKSEL_CON(10),
+       RK3288_CLKSEL_CON(33),
+       RK3288_CLKSEL_CON(37),
+};
+
+static u32 rk3288_saved_cru_regs[ARRAY_SIZE(rk3288_saved_cru_reg_ids)];
+
+static int rk3288_clk_suspend(void)
+{
+       int i, reg_id;
+
+       for (i = 0; i < ARRAY_SIZE(rk3288_saved_cru_reg_ids); i++) {
+               reg_id = rk3288_saved_cru_reg_ids[i];
+
+               rk3288_saved_cru_regs[i] =
+                               readl_relaxed(rk3288_cru_base + reg_id);
+       }
+       return 0;
+}
+
+static void rk3288_clk_resume(void)
+{
+       int i, reg_id;
+
+       for (i = ARRAY_SIZE(rk3288_saved_cru_reg_ids) - 1; i >= 0; i--) {
+               reg_id = rk3288_saved_cru_reg_ids[i];
+
+               writel_relaxed(rk3288_saved_cru_regs[i] | 0xffff0000,
+                              rk3288_cru_base + reg_id);
+       }
+}
+
+static struct syscore_ops rk3288_clk_syscore_ops = {
+       .suspend = rk3288_clk_suspend,
+       .resume = rk3288_clk_resume,
+};
+
+static void rk3288_clk_sleep_init(void __iomem *reg_base)
+{
+       rk3288_cru_base = reg_base;
+       register_syscore_ops(&rk3288_clk_syscore_ops);
+}
+
+#else /* CONFIG_PM_SLEEP */
+static void rk3288_clk_sleep_init(void __iomem *reg_base) {}
+#endif
+
 static void __init rk3288_clk_init(struct device_node *np)
 {
        void __iomem *reg_base;
@@ -810,5 +883,6 @@ static void __init rk3288_clk_init(struct device_node *np)
                                  ROCKCHIP_SOFTRST_HIWORD_MASK);
 
        rockchip_register_restart_notifier(RK3288_GLB_SRST_FST);
+       rk3288_clk_sleep_init(reg_base);
 }
 CLK_OF_DECLARE(rk3288_cru, "rockchip,rk3288-cru", rk3288_clk_init);
index 880a266f01431b3b9e7040565d3a3e81f0716a8b..20e05bbb3a6766bd8c630c9ce9cd16d16d81c6f8 100644 (file)
@@ -197,7 +197,8 @@ void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
                                list->parent_names, list->num_parents,
                                reg_base, list->con_offset, grf_lock_offset,
                                list->lock_shift, list->mode_offset,
-                               list->mode_shift, list->rate_table, &clk_lock);
+                               list->mode_shift, list->rate_table,
+                               list->pll_flags, &clk_lock);
                if (IS_ERR(clk)) {
                        pr_err("%s: failed to register clock %s\n", __func__,
                                list->name);
@@ -244,9 +245,6 @@ void __init rockchip_clk_register_branches(
                                        list->div_flags, &clk_lock);
                        break;
                case branch_fraction_divider:
-                       /* keep all gates untouched for now */
-                       flags |= CLK_IGNORE_UNUSED;
-
                        clk = rockchip_clk_register_frac_branch(list->name,
                                list->parent_names, list->num_parents,
                                reg_base, list->muxdiv_offset, list->div_flags,
@@ -256,18 +254,12 @@ void __init rockchip_clk_register_branches(
                case branch_gate:
                        flags |= CLK_SET_RATE_PARENT;
 
-                       /* keep all gates untouched for now */
-                       flags |= CLK_IGNORE_UNUSED;
-
                        clk = clk_register_gate(NULL, list->name,
                                list->parent_names[0], flags,
                                reg_base + list->gate_offset,
                                list->gate_shift, list->gate_flags, &clk_lock);
                        break;
                case branch_composite:
-                       /* keep all gates untouched for now */
-                       flags |= CLK_IGNORE_UNUSED;
-
                        clk = rockchip_clk_register_branch(list->name,
                                list->parent_names, list->num_parents,
                                reg_base, list->muxdiv_offset, list->mux_shift,
@@ -277,6 +269,14 @@ void __init rockchip_clk_register_branches(
                                list->gate_offset, list->gate_shift,
                                list->gate_flags, flags, &clk_lock);
                        break;
+               case branch_mmc:
+                       clk = rockchip_clk_register_mmc(
+                               list->name,
+                               list->parent_names, list->num_parents,
+                               reg_base + list->muxdiv_offset,
+                               list->div_shift
+                       );
+                       break;
                }
 
                /* none of the cases above matched */
index ca009ab0a33a28ad507d83cf9cf0de183b33a09e..58d2e3bdf22fed499f8b4893eaf48690433d0298 100644 (file)
 #define RK3288_GLB_SRST_SND            0x1b4
 #define RK3288_SOFTRST_CON(x)          (x * 0x4 + 0x1b8)
 #define RK3288_MISC_CON                        0x1e8
+#define RK3288_SDMMC_CON0              0x200
+#define RK3288_SDMMC_CON1              0x204
+#define RK3288_SDIO0_CON0              0x208
+#define RK3288_SDIO0_CON1              0x20c
+#define RK3288_SDIO1_CON0              0x210
+#define RK3288_SDIO1_CON1              0x214
+#define RK3288_EMMC_CON0               0x218
+#define RK3288_EMMC_CON1               0x21c
 
 enum rockchip_pll_type {
        pll_rk3066,
@@ -62,6 +70,15 @@ enum rockchip_pll_type {
        .bwadj = (_nf >> 1),                    \
 }
 
+#define RK3066_PLL_RATE_BWADJ(_rate, _nr, _nf, _no, _bw)       \
+{                                                              \
+       .rate   = _rate##U,                                     \
+       .nr = _nr,                                              \
+       .nf = _nf,                                              \
+       .no = _no,                                              \
+       .bwadj = _bw,                                           \
+}
+
 struct rockchip_pll_rate_table {
        unsigned long rate;
        unsigned int nr;
@@ -81,7 +98,12 @@ struct rockchip_pll_rate_table {
  * @mode_shift: offset inside the mode-register for the mode of this pll.
  * @lock_shift: offset inside the lock register for the lock status.
  * @type: Type of PLL to be registered.
+ * @pll_flags: hardware-specific flags
  * @rate_table: Table of usable pll rates
+ *
+ * Flags:
+ * ROCKCHIP_PLL_SYNC_RATE - check rate parameters to match against the
+ *     rate_table parameters and ajust them if necessary.
  */
 struct rockchip_pll_clock {
        unsigned int            id;
@@ -94,11 +116,14 @@ struct rockchip_pll_clock {
        int                     mode_shift;
        int                     lock_shift;
        enum rockchip_pll_type  type;
+       u8                      pll_flags;
        struct rockchip_pll_rate_table *rate_table;
 };
 
+#define ROCKCHIP_PLL_SYNC_RATE         BIT(0)
+
 #define PLL(_type, _id, _name, _pnames, _flags, _con, _mode, _mshift,  \
-               _lshift, _rtable)                                       \
+               _lshift, _pflags, _rtable)                              \
        {                                                               \
                .id             = _id,                                  \
                .type           = _type,                                \
@@ -110,6 +135,7 @@ struct rockchip_pll_clock {
                .mode_offset    = _mode,                                \
                .mode_shift     = _mshift,                              \
                .lock_shift     = _lshift,                              \
+               .pll_flags      = _pflags,                              \
                .rate_table     = _rtable,                              \
        }
 
@@ -118,7 +144,7 @@ struct clk *rockchip_clk_register_pll(enum rockchip_pll_type pll_type,
                void __iomem *base, int con_offset, int grf_lock_offset,
                int lock_shift, int reg_mode, int mode_shift,
                struct rockchip_pll_rate_table *rate_table,
-               spinlock_t *lock);
+               u8 clk_pll_flags, spinlock_t *lock);
 
 struct rockchip_cpuclk_clksel {
        int reg;
@@ -152,6 +178,10 @@ struct clk *rockchip_clk_register_cpuclk(const char *name,
                        const struct rockchip_cpuclk_rate_table *rates,
                        int nrates, void __iomem *reg_base, spinlock_t *lock);
 
+struct clk *rockchip_clk_register_mmc(const char *name,
+                               const char **parent_names, u8 num_parents,
+                               void __iomem *reg, int shift);
+
 #define PNAME(x) static const char *x[] __initconst
 
 enum rockchip_clk_branch_type {
@@ -160,6 +190,7 @@ enum rockchip_clk_branch_type {
        branch_divider,
        branch_fraction_divider,
        branch_gate,
+       branch_mmc,
 };
 
 struct rockchip_clk_branch {
@@ -352,6 +383,16 @@ struct rockchip_clk_branch {
                .gate_flags     = gf,                           \
        }
 
+#define MMC(_id, cname, pname, offset, shift)                  \
+       {                                                       \
+               .id             = _id,                          \
+               .branch_type    = branch_mmc,                   \
+               .name           = cname,                        \
+               .parent_names   = (const char *[]){ pname },    \
+               .num_parents    = 1,                            \
+               .muxdiv_offset  = offset,                       \
+               .div_shift      = shift,                        \
+       }
 
 void rockchip_clk_init(struct device_node *np, void __iomem *base,
                       unsigned long nr_clks);
index 6fb4bc602e8ac467d4489e51f30b642e5771dda7..006c6f294310d51e804bfda91b76e3eeb211b7e7 100644 (file)
@@ -5,6 +5,7 @@
 obj-$(CONFIG_COMMON_CLK)       += clk.o clk-pll.o
 obj-$(CONFIG_SOC_EXYNOS3250)   += clk-exynos3250.o
 obj-$(CONFIG_ARCH_EXYNOS4)     += clk-exynos4.o
+obj-$(CONFIG_SOC_EXYNOS4415)   += clk-exynos4415.o
 obj-$(CONFIG_SOC_EXYNOS5250)   += clk-exynos5250.o
 obj-$(CONFIG_SOC_EXYNOS5260)   += clk-exynos5260.o
 obj-$(CONFIG_SOC_EXYNOS5410)   += clk-exynos5410.o
@@ -12,6 +13,7 @@ obj-$(CONFIG_SOC_EXYNOS5420)  += clk-exynos5420.o
 obj-$(CONFIG_SOC_EXYNOS5440)   += clk-exynos5440.o
 obj-$(CONFIG_ARCH_EXYNOS)      += clk-exynos-audss.o
 obj-$(CONFIG_ARCH_EXYNOS)      += clk-exynos-clkout.o
+obj-$(CONFIG_ARCH_EXYNOS7)     += clk-exynos7.o
 obj-$(CONFIG_S3C2410_COMMON_CLK)+= clk-s3c2410.o
 obj-$(CONFIG_S3C2410_COMMON_DCLK)+= clk-s3c2410-dclk.o
 obj-$(CONFIG_S3C2412_COMMON_CLK)+= clk-s3c2412.o
index acce708ace18e02997a004aebd6b2b0fee17ef20..f2c2ccce49bb1ad00f7502e9d4e4236fc6bcbe54 100644 (file)
@@ -29,6 +29,13 @@ static DEFINE_SPINLOCK(lock);
 static struct clk **clk_table;
 static void __iomem *reg_base;
 static struct clk_onecell_data clk_data;
+/*
+ * On Exynos5420 this will be a clock which has to be enabled before any
+ * access to audss registers. Typically a child of EPLL.
+ *
+ * On other platforms this will be -ENODEV.
+ */
+static struct clk *epll;
 
 #define ASS_CLK_SRC 0x0
 #define ASS_CLK_DIV 0x4
@@ -98,6 +105,8 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
                dev_err(&pdev->dev, "failed to map audss registers\n");
                return PTR_ERR(reg_base);
        }
+       /* EPLL don't have to be enabled for boards other than Exynos5420 */
+       epll = ERR_PTR(-ENODEV);
 
        clk_table = devm_kzalloc(&pdev->dev,
                                sizeof(struct clk *) * EXYNOS_AUDSS_MAX_CLKS,
@@ -115,8 +124,20 @@ static int exynos_audss_clk_probe(struct platform_device *pdev)
        pll_in = devm_clk_get(&pdev->dev, "pll_in");
        if (!IS_ERR(pll_ref))
                mout_audss_p[0] = __clk_get_name(pll_ref);
-       if (!IS_ERR(pll_in))
+       if (!IS_ERR(pll_in)) {
                mout_audss_p[1] = __clk_get_name(pll_in);
+
+               if (variant == TYPE_EXYNOS5420) {
+                       epll = pll_in;
+
+                       ret = clk_prepare_enable(epll);
+                       if (ret) {
+                               dev_err(&pdev->dev,
+                                               "failed to prepare the epll clock\n");
+                               return ret;
+                       }
+               }
+       }
        clk_table[EXYNOS_MOUT_AUDSS] = clk_register_mux(NULL, "mout_audss",
                                mout_audss_p, ARRAY_SIZE(mout_audss_p),
                                CLK_SET_RATE_NO_REPARENT,
@@ -203,6 +224,9 @@ unregister:
                        clk_unregister(clk_table[i]);
        }
 
+       if (!IS_ERR(epll))
+               clk_disable_unprepare(epll);
+
        return ret;
 }
 
@@ -210,6 +234,10 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
 {
        int i;
 
+#ifdef CONFIG_PM_SLEEP
+       unregister_syscore_ops(&exynos_audss_clk_syscore_ops);
+#endif
+
        of_clk_del_provider(pdev->dev.of_node);
 
        for (i = 0; i < clk_data.clk_num; i++) {
@@ -217,6 +245,9 @@ static int exynos_audss_clk_remove(struct platform_device *pdev)
                        clk_unregister(clk_table[i]);
        }
 
+       if (!IS_ERR(epll))
+               clk_disable_unprepare(epll);
+
        return 0;
 }
 
index 940f02837b824869932d3e6b63462942577a708c..88e8c6bbd77ff8ea6919bc6c1ef7bd3fdc1324ae 100644 (file)
@@ -505,7 +505,7 @@ static struct samsung_fixed_rate_clock exynos4_fixed_rate_ext_clks[] __initdata
 /* fixed rate clocks generated inside the soc */
 static struct samsung_fixed_rate_clock exynos4_fixed_rate_clks[] __initdata = {
        FRATE(0, "sclk_hdmi24m", NULL, CLK_IS_ROOT, 24000000),
-       FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
+       FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", "hdmi", 0, 27000000),
        FRATE(0, "sclk_usbphy0", NULL, CLK_IS_ROOT, 48000000),
 };
 
diff --git a/drivers/clk/samsung/clk-exynos4415.c b/drivers/clk/samsung/clk-exynos4415.c
new file mode 100644 (file)
index 0000000..2123fc2
--- /dev/null
@@ -0,0 +1,1144 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Common Clock Framework support for Exynos4415 SoC.
+ */
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/syscore_ops.h>
+
+#include <dt-bindings/clock/exynos4415.h>
+
+#include "clk.h"
+#include "clk-pll.h"
+
+#define SRC_LEFTBUS            0x4200
+#define DIV_LEFTBUS            0x4500
+#define GATE_IP_LEFTBUS                0x4800
+#define GATE_IP_IMAGE          0x4930
+#define SRC_RIGHTBUS           0x8200
+#define DIV_RIGHTBUS           0x8500
+#define GATE_IP_RIGHTBUS       0x8800
+#define GATE_IP_PERIR          0x8960
+#define EPLL_LOCK              0xc010
+#define G3D_PLL_LOCK           0xc020
+#define DISP_PLL_LOCK          0xc030
+#define ISP_PLL_LOCK           0xc040
+#define EPLL_CON0              0xc110
+#define EPLL_CON1              0xc114
+#define EPLL_CON2              0xc118
+#define G3D_PLL_CON0           0xc120
+#define G3D_PLL_CON1           0xc124
+#define G3D_PLL_CON2           0xc128
+#define ISP_PLL_CON0           0xc130
+#define ISP_PLL_CON1           0xc134
+#define ISP_PLL_CON2           0xc138
+#define DISP_PLL_CON0          0xc140
+#define DISP_PLL_CON1          0xc144
+#define DISP_PLL_CON2          0xc148
+#define SRC_TOP0               0xc210
+#define SRC_TOP1               0xc214
+#define SRC_CAM                        0xc220
+#define SRC_TV                 0xc224
+#define SRC_MFC                        0xc228
+#define SRC_G3D                        0xc22c
+#define SRC_LCD                        0xc234
+#define SRC_ISP                        0xc238
+#define SRC_MAUDIO             0xc23c
+#define SRC_FSYS               0xc240
+#define SRC_PERIL0             0xc250
+#define SRC_PERIL1             0xc254
+#define SRC_CAM1               0xc258
+#define SRC_TOP_ISP0           0xc25c
+#define SRC_TOP_ISP1           0xc260
+#define SRC_MASK_TOP           0xc310
+#define SRC_MASK_CAM           0xc320
+#define SRC_MASK_TV            0xc324
+#define SRC_MASK_LCD           0xc334
+#define SRC_MASK_ISP           0xc338
+#define SRC_MASK_MAUDIO                0xc33c
+#define SRC_MASK_FSYS          0xc340
+#define SRC_MASK_PERIL0                0xc350
+#define SRC_MASK_PERIL1                0xc354
+#define DIV_TOP                        0xc510
+#define DIV_CAM                        0xc520
+#define DIV_TV                 0xc524
+#define DIV_MFC                        0xc528
+#define DIV_G3D                        0xc52c
+#define DIV_LCD                        0xc534
+#define DIV_ISP                        0xc538
+#define DIV_MAUDIO             0xc53c
+#define DIV_FSYS0              0xc540
+#define DIV_FSYS1              0xc544
+#define DIV_FSYS2              0xc548
+#define DIV_PERIL0             0xc550
+#define DIV_PERIL1             0xc554
+#define DIV_PERIL2             0xc558
+#define DIV_PERIL3             0xc55c
+#define DIV_PERIL4             0xc560
+#define DIV_PERIL5             0xc564
+#define DIV_CAM1               0xc568
+#define DIV_TOP_ISP1           0xc56c
+#define DIV_TOP_ISP0           0xc570
+#define CLKDIV2_RATIO          0xc580
+#define GATE_SCLK_CAM          0xc820
+#define GATE_SCLK_TV           0xc824
+#define GATE_SCLK_MFC          0xc828
+#define GATE_SCLK_G3D          0xc82c
+#define GATE_SCLK_LCD          0xc834
+#define GATE_SCLK_MAUDIO       0xc83c
+#define GATE_SCLK_FSYS         0xc840
+#define GATE_SCLK_PERIL                0xc850
+#define GATE_IP_CAM            0xc920
+#define GATE_IP_TV             0xc924
+#define GATE_IP_MFC            0xc928
+#define GATE_IP_G3D            0xc92c
+#define GATE_IP_LCD            0xc934
+#define GATE_IP_FSYS           0xc940
+#define GATE_IP_PERIL          0xc950
+#define GATE_BLOCK             0xc970
+#define APLL_LOCK              0x14000
+#define APLL_CON0              0x14100
+#define SRC_CPU                        0x14200
+#define DIV_CPU0               0x14500
+#define DIV_CPU1               0x14504
+
+enum exynos4415_plls {
+       apll, epll, g3d_pll, isp_pll, disp_pll,
+       nr_plls,
+};
+
+static struct samsung_clk_provider *exynos4415_ctx;
+
+/*
+ * Support for CMU save/restore across system suspends
+ */
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *exynos4415_clk_regs;
+
+static unsigned long exynos4415_cmu_clk_regs[] __initdata = {
+       SRC_LEFTBUS,
+       DIV_LEFTBUS,
+       GATE_IP_LEFTBUS,
+       GATE_IP_IMAGE,
+       SRC_RIGHTBUS,
+       DIV_RIGHTBUS,
+       GATE_IP_RIGHTBUS,
+       GATE_IP_PERIR,
+       EPLL_LOCK,
+       G3D_PLL_LOCK,
+       DISP_PLL_LOCK,
+       ISP_PLL_LOCK,
+       EPLL_CON0,
+       EPLL_CON1,
+       EPLL_CON2,
+       G3D_PLL_CON0,
+       G3D_PLL_CON1,
+       G3D_PLL_CON2,
+       ISP_PLL_CON0,
+       ISP_PLL_CON1,
+       ISP_PLL_CON2,
+       DISP_PLL_CON0,
+       DISP_PLL_CON1,
+       DISP_PLL_CON2,
+       SRC_TOP0,
+       SRC_TOP1,
+       SRC_CAM,
+       SRC_TV,
+       SRC_MFC,
+       SRC_G3D,
+       SRC_LCD,
+       SRC_ISP,
+       SRC_MAUDIO,
+       SRC_FSYS,
+       SRC_PERIL0,
+       SRC_PERIL1,
+       SRC_CAM1,
+       SRC_TOP_ISP0,
+       SRC_TOP_ISP1,
+       SRC_MASK_TOP,
+       SRC_MASK_CAM,
+       SRC_MASK_TV,
+       SRC_MASK_LCD,
+       SRC_MASK_ISP,
+       SRC_MASK_MAUDIO,
+       SRC_MASK_FSYS,
+       SRC_MASK_PERIL0,
+       SRC_MASK_PERIL1,
+       DIV_TOP,
+       DIV_CAM,
+       DIV_TV,
+       DIV_MFC,
+       DIV_G3D,
+       DIV_LCD,
+       DIV_ISP,
+       DIV_MAUDIO,
+       DIV_FSYS0,
+       DIV_FSYS1,
+       DIV_FSYS2,
+       DIV_PERIL0,
+       DIV_PERIL1,
+       DIV_PERIL2,
+       DIV_PERIL3,
+       DIV_PERIL4,
+       DIV_PERIL5,
+       DIV_CAM1,
+       DIV_TOP_ISP1,
+       DIV_TOP_ISP0,
+       CLKDIV2_RATIO,
+       GATE_SCLK_CAM,
+       GATE_SCLK_TV,
+       GATE_SCLK_MFC,
+       GATE_SCLK_G3D,
+       GATE_SCLK_LCD,
+       GATE_SCLK_MAUDIO,
+       GATE_SCLK_FSYS,
+       GATE_SCLK_PERIL,
+       GATE_IP_CAM,
+       GATE_IP_TV,
+       GATE_IP_MFC,
+       GATE_IP_G3D,
+       GATE_IP_LCD,
+       GATE_IP_FSYS,
+       GATE_IP_PERIL,
+       GATE_BLOCK,
+       APLL_LOCK,
+       APLL_CON0,
+       SRC_CPU,
+       DIV_CPU0,
+       DIV_CPU1,
+};
+
+static int exynos4415_clk_suspend(void)
+{
+       samsung_clk_save(exynos4415_ctx->reg_base, exynos4415_clk_regs,
+                               ARRAY_SIZE(exynos4415_cmu_clk_regs));
+
+       return 0;
+}
+
+static void exynos4415_clk_resume(void)
+{
+       samsung_clk_restore(exynos4415_ctx->reg_base, exynos4415_clk_regs,
+                               ARRAY_SIZE(exynos4415_cmu_clk_regs));
+}
+
+static struct syscore_ops exynos4415_clk_syscore_ops = {
+       .suspend = exynos4415_clk_suspend,
+       .resume = exynos4415_clk_resume,
+};
+
+static void exynos4415_clk_sleep_init(void)
+{
+       exynos4415_clk_regs =
+               samsung_clk_alloc_reg_dump(exynos4415_cmu_clk_regs,
+                                       ARRAY_SIZE(exynos4415_cmu_clk_regs));
+       if (!exynos4415_clk_regs) {
+               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
+               return;
+       }
+
+       register_syscore_ops(&exynos4415_clk_syscore_ops);
+}
+#else
+static inline void exynos4415_clk_sleep_init(void) { }
+#endif
+
+/* list of all parent clock list */
+PNAME(mout_g3d_pllsrc_p)       = { "fin_pll", };
+
+PNAME(mout_apll_p)             = { "fin_pll", "fout_apll", };
+PNAME(mout_g3d_pll_p)          = { "fin_pll", "fout_g3d_pll", };
+PNAME(mout_isp_pll_p)          = { "fin_pll", "fout_isp_pll", };
+PNAME(mout_disp_pll_p)         = { "fin_pll", "fout_disp_pll", };
+
+PNAME(mout_mpll_user_p)                = { "fin_pll", "div_mpll_pre", };
+PNAME(mout_epll_p)             = { "fin_pll", "fout_epll", };
+PNAME(mout_core_p)             = { "mout_apll", "mout_mpll_user_c", };
+PNAME(mout_hpm_p)              = { "mout_apll", "mout_mpll_user_c", };
+
+PNAME(mout_ebi_p)              = { "div_aclk_200", "div_aclk_160", };
+PNAME(mout_ebi_1_p)            = { "mout_ebi", "mout_g3d_pll", };
+
+PNAME(mout_gdl_p)              = { "mout_mpll_user_l", };
+PNAME(mout_gdr_p)              = { "mout_mpll_user_r", };
+
+PNAME(mout_aclk_266_p)         = { "mout_mpll_user_t", "mout_g3d_pll", };
+
+PNAME(group_epll_g3dpll_p)     = { "mout_epll", "mout_g3d_pll" };
+PNAME(group_sclk_p)            = { "xxti", "xusbxti",
+                                   "none", "mout_isp_pll",
+                                   "none", "none", "div_mpll_pre",
+                                   "mout_epll", "mout_g3d_pll", };
+PNAME(group_spdif_p)           = { "mout_audio0", "mout_audio1",
+                                   "mout_audio2", "spdif_extclk", };
+PNAME(group_sclk_audio2_p)     = { "audiocdclk2", "none",
+                                   "none", "mout_isp_pll",
+                                   "mout_disp_pll", "xusbxti",
+                                   "div_mpll_pre", "mout_epll",
+                                   "mout_g3d_pll", };
+PNAME(group_sclk_audio1_p)     = { "audiocdclk1", "none",
+                                   "none", "mout_isp_pll",
+                                   "mout_disp_pll", "xusbxti",
+                                   "div_mpll_pre", "mout_epll",
+                                   "mout_g3d_pll", };
+PNAME(group_sclk_audio0_p)     = { "audiocdclk0", "none",
+                                   "none", "mout_isp_pll",
+                                   "mout_disp_pll", "xusbxti",
+                                   "div_mpll_pre", "mout_epll",
+                                   "mout_g3d_pll", };
+PNAME(group_fimc_lclk_p)       = { "xxti", "xusbxti",
+                                   "none", "mout_isp_pll",
+                                   "none", "mout_disp_pll",
+                                   "mout_mpll_user_t", "mout_epll",
+                                   "mout_g3d_pll", };
+PNAME(group_sclk_fimd0_p)      = { "xxti", "xusbxti",
+                                   "m_bitclkhsdiv4_4l", "mout_isp_pll",
+                                   "mout_disp_pll", "sclk_hdmiphy",
+                                   "div_mpll_pre", "mout_epll",
+                                   "mout_g3d_pll", };
+PNAME(mout_hdmi_p)             = { "sclk_pixel", "sclk_hdmiphy" };
+PNAME(mout_mfc_p)              = { "mout_mfc_0", "mout_mfc_1" };
+PNAME(mout_g3d_p)              = { "mout_g3d_0", "mout_g3d_1" };
+PNAME(mout_jpeg_p)             = { "mout_jpeg_0", "mout_jpeg_1" };
+PNAME(mout_jpeg1_p)            = { "mout_epll", "mout_g3d_pll" };
+PNAME(group_aclk_isp0_300_p)   = { "mout_isp_pll", "div_mpll_pre" };
+PNAME(group_aclk_isp0_400_user_p) = { "fin_pll", "div_aclk_400_mcuisp" };
+PNAME(group_aclk_isp0_300_user_p) = { "fin_pll", "mout_aclk_isp0_300" };
+PNAME(group_aclk_isp1_300_user_p) = { "fin_pll", "mout_aclk_isp1_300" };
+PNAME(group_mout_mpll_user_t_p)        = { "mout_mpll_user_t" };
+
+static struct samsung_fixed_factor_clock exynos4415_fixed_factor_clks[] __initdata = {
+       /* HACK: fin_pll hardcoded to xusbxti until detection is implemented. */
+       FFACTOR(CLK_FIN_PLL, "fin_pll", "xusbxti", 1, 1, 0),
+};
+
+static struct samsung_fixed_rate_clock exynos4415_fixed_rate_clks[] __initdata = {
+       FRATE(CLK_SCLK_HDMIPHY, "sclk_hdmiphy", NULL, CLK_IS_ROOT, 27000000),
+};
+
+static struct samsung_mux_clock exynos4415_mux_clks[] __initdata = {
+       /*
+        * NOTE: Following table is sorted by register address in ascending
+        * order and then bitfield shift in descending order, as it is done
+        * in the User's Manual. When adding new entries, please make sure
+        * that the order is preserved, to avoid merge conflicts and make
+        * further work with defined data easier.
+        */
+
+       /* SRC_LEFTBUS */
+       MUX(CLK_MOUT_MPLL_USER_L, "mout_mpll_user_l", mout_mpll_user_p,
+               SRC_LEFTBUS, 4, 1),
+       MUX(CLK_MOUT_GDL, "mout_gdl", mout_gdl_p, SRC_LEFTBUS, 0, 1),
+
+       /* SRC_RIGHTBUS */
+       MUX(CLK_MOUT_MPLL_USER_R, "mout_mpll_user_r", mout_mpll_user_p,
+               SRC_RIGHTBUS, 4, 1),
+       MUX(CLK_MOUT_GDR, "mout_gdr", mout_gdr_p, SRC_RIGHTBUS, 0, 1),
+
+       /* SRC_TOP0 */
+       MUX(CLK_MOUT_EBI, "mout_ebi", mout_ebi_p, SRC_TOP0, 28, 1),
+       MUX(CLK_MOUT_ACLK_200, "mout_aclk_200", group_mout_mpll_user_t_p,
+               SRC_TOP0, 24, 1),
+       MUX(CLK_MOUT_ACLK_160, "mout_aclk_160", group_mout_mpll_user_t_p,
+               SRC_TOP0, 20, 1),
+       MUX(CLK_MOUT_ACLK_100, "mout_aclk_100", group_mout_mpll_user_t_p,
+               SRC_TOP0, 16, 1),
+       MUX(CLK_MOUT_ACLK_266, "mout_aclk_266", mout_aclk_266_p,
+               SRC_TOP0, 12, 1),
+       MUX(CLK_MOUT_G3D_PLL, "mout_g3d_pll", mout_g3d_pll_p,
+               SRC_TOP0, 8, 1),
+       MUX(CLK_MOUT_EPLL, "mout_epll", mout_epll_p, SRC_TOP0, 4, 1),
+       MUX(CLK_MOUT_EBI_1, "mout_ebi_1", mout_ebi_1_p, SRC_TOP0, 0, 1),
+
+       /* SRC_TOP1 */
+       MUX(CLK_MOUT_ISP_PLL, "mout_isp_pll", mout_isp_pll_p,
+               SRC_TOP1, 28, 1),
+       MUX(CLK_MOUT_DISP_PLL, "mout_disp_pll", mout_disp_pll_p,
+               SRC_TOP1, 16, 1),
+       MUX(CLK_MOUT_MPLL_USER_T, "mout_mpll_user_t", mout_mpll_user_p,
+               SRC_TOP1, 12, 1),
+       MUX(CLK_MOUT_ACLK_400_MCUISP, "mout_aclk_400_mcuisp",
+               group_mout_mpll_user_t_p, SRC_TOP1, 8, 1),
+       MUX(CLK_MOUT_G3D_PLLSRC, "mout_g3d_pllsrc", mout_g3d_pllsrc_p,
+               SRC_TOP1, 0, 1),
+
+       /* SRC_CAM */
+       MUX(CLK_MOUT_CSIS1, "mout_csis1", group_fimc_lclk_p, SRC_CAM, 28, 4),
+       MUX(CLK_MOUT_CSIS0, "mout_csis0", group_fimc_lclk_p, SRC_CAM, 24, 4),
+       MUX(CLK_MOUT_CAM1, "mout_cam1", group_fimc_lclk_p, SRC_CAM, 20, 4),
+       MUX(CLK_MOUT_FIMC3_LCLK, "mout_fimc3_lclk", group_fimc_lclk_p, SRC_CAM,
+               12, 4),
+       MUX(CLK_MOUT_FIMC2_LCLK, "mout_fimc2_lclk", group_fimc_lclk_p, SRC_CAM,
+               8, 4),
+       MUX(CLK_MOUT_FIMC1_LCLK, "mout_fimc1_lclk", group_fimc_lclk_p, SRC_CAM,
+               4, 4),
+       MUX(CLK_MOUT_FIMC0_LCLK, "mout_fimc0_lclk", group_fimc_lclk_p, SRC_CAM,
+               0, 4),
+
+       /* SRC_TV */
+       MUX(CLK_MOUT_HDMI, "mout_hdmi", mout_hdmi_p, SRC_TV, 0, 1),
+
+       /* SRC_MFC */
+       MUX(CLK_MOUT_MFC, "mout_mfc", mout_mfc_p, SRC_MFC, 8, 1),
+       MUX(CLK_MOUT_MFC_1, "mout_mfc_1", group_epll_g3dpll_p, SRC_MFC, 4, 1),
+       MUX(CLK_MOUT_MFC_0, "mout_mfc_0", group_mout_mpll_user_t_p, SRC_MFC, 0,
+               1),
+
+       /* SRC_G3D */
+       MUX(CLK_MOUT_G3D, "mout_g3d", mout_g3d_p, SRC_G3D, 8, 1),
+       MUX(CLK_MOUT_G3D_1, "mout_g3d_1", group_epll_g3dpll_p, SRC_G3D, 4, 1),
+       MUX(CLK_MOUT_G3D_0, "mout_g3d_0", group_mout_mpll_user_t_p, SRC_G3D, 0,
+               1),
+
+       /* SRC_LCD */
+       MUX(CLK_MOUT_MIPI0, "mout_mipi0", group_fimc_lclk_p, SRC_LCD, 12, 4),
+       MUX(CLK_MOUT_FIMD0, "mout_fimd0", group_sclk_fimd0_p, SRC_LCD, 0, 4),
+
+       /* SRC_ISP */
+       MUX(CLK_MOUT_TSADC_ISP, "mout_tsadc_isp", group_fimc_lclk_p, SRC_ISP,
+               16, 4),
+       MUX(CLK_MOUT_UART_ISP, "mout_uart_isp", group_fimc_lclk_p, SRC_ISP,
+               12, 4),
+       MUX(CLK_MOUT_SPI1_ISP, "mout_spi1_isp", group_fimc_lclk_p, SRC_ISP,
+               8, 4),
+       MUX(CLK_MOUT_SPI0_ISP, "mout_spi0_isp", group_fimc_lclk_p, SRC_ISP,
+               4, 4),
+       MUX(CLK_MOUT_PWM_ISP, "mout_pwm_isp", group_fimc_lclk_p, SRC_ISP,
+               0, 4),
+
+       /* SRC_MAUDIO */
+       MUX(CLK_MOUT_AUDIO0, "mout_audio0", group_sclk_audio0_p, SRC_MAUDIO,
+               0, 4),
+
+       /* SRC_FSYS */
+       MUX(CLK_MOUT_TSADC, "mout_tsadc", group_sclk_p, SRC_FSYS, 28, 4),
+       MUX(CLK_MOUT_MMC2, "mout_mmc2", group_sclk_p, SRC_FSYS, 8, 4),
+       MUX(CLK_MOUT_MMC1, "mout_mmc1", group_sclk_p, SRC_FSYS, 4, 4),
+       MUX(CLK_MOUT_MMC0, "mout_mmc0", group_sclk_p, SRC_FSYS, 0, 4),
+
+       /* SRC_PERIL0 */
+       MUX(CLK_MOUT_UART3, "mout_uart3", group_sclk_p, SRC_PERIL0, 12, 4),
+       MUX(CLK_MOUT_UART2, "mout_uart2", group_sclk_p, SRC_PERIL0, 8, 4),
+       MUX(CLK_MOUT_UART1, "mout_uart1", group_sclk_p, SRC_PERIL0, 4, 4),
+       MUX(CLK_MOUT_UART0, "mout_uart0", group_sclk_p, SRC_PERIL0, 0, 4),
+
+       /* SRC_PERIL1 */
+       MUX(CLK_MOUT_SPI2, "mout_spi2", group_sclk_p, SRC_PERIL1, 24, 4),
+       MUX(CLK_MOUT_SPI1, "mout_spi1", group_sclk_p, SRC_PERIL1, 20, 4),
+       MUX(CLK_MOUT_SPI0, "mout_spi0", group_sclk_p, SRC_PERIL1, 16, 4),
+       MUX(CLK_MOUT_SPDIF, "mout_spdif", group_spdif_p, SRC_PERIL1, 8, 4),
+       MUX(CLK_MOUT_AUDIO2, "mout_audio2", group_sclk_audio2_p, SRC_PERIL1,
+               4, 4),
+       MUX(CLK_MOUT_AUDIO1, "mout_audio1", group_sclk_audio1_p, SRC_PERIL1,
+               0, 4),
+
+       /* SRC_CPU */
+       MUX(CLK_MOUT_MPLL_USER_C, "mout_mpll_user_c", mout_mpll_user_p,
+               SRC_CPU, 24, 1),
+       MUX(CLK_MOUT_HPM, "mout_hpm", mout_hpm_p, SRC_CPU, 20, 1),
+       MUX_F(CLK_MOUT_CORE, "mout_core", mout_core_p, SRC_CPU, 16, 1, 0,
+               CLK_MUX_READ_ONLY),
+       MUX_F(CLK_MOUT_APLL, "mout_apll", mout_apll_p, SRC_CPU, 0, 1,
+               CLK_SET_RATE_PARENT, 0),
+
+       /* SRC_CAM1 */
+       MUX(CLK_MOUT_PXLASYNC_CSIS1_FIMC, "mout_pxlasync_csis1",
+               group_fimc_lclk_p, SRC_CAM1, 20, 1),
+       MUX(CLK_MOUT_PXLASYNC_CSIS0_FIMC, "mout_pxlasync_csis0",
+               group_fimc_lclk_p, SRC_CAM1, 16, 1),
+       MUX(CLK_MOUT_JPEG, "mout_jpeg", mout_jpeg_p, SRC_CAM1, 8, 1),
+       MUX(CLK_MOUT_JPEG1, "mout_jpeg_1", mout_jpeg1_p, SRC_CAM1, 4, 1),
+       MUX(CLK_MOUT_JPEG0, "mout_jpeg_0", group_mout_mpll_user_t_p, SRC_CAM1,
+               0, 1),
+
+       /* SRC_TOP_ISP0 */
+       MUX(CLK_MOUT_ACLK_ISP0_300, "mout_aclk_isp0_300",
+               group_aclk_isp0_300_p, SRC_TOP_ISP0, 8, 1),
+       MUX(CLK_MOUT_ACLK_ISP0_400, "mout_aclk_isp0_400_user",
+               group_aclk_isp0_400_user_p, SRC_TOP_ISP0, 4, 1),
+       MUX(CLK_MOUT_ACLK_ISP0_300_USER, "mout_aclk_isp0_300_user",
+               group_aclk_isp0_300_user_p, SRC_TOP_ISP0, 0, 1),
+
+       /* SRC_TOP_ISP1 */
+       MUX(CLK_MOUT_ACLK_ISP1_300, "mout_aclk_isp1_300",
+               group_aclk_isp0_300_p, SRC_TOP_ISP1, 4, 1),
+       MUX(CLK_MOUT_ACLK_ISP1_300_USER, "mout_aclk_isp1_300_user",
+               group_aclk_isp1_300_user_p, SRC_TOP_ISP1, 0, 1),
+};
+
+static struct samsung_div_clock exynos4415_div_clks[] __initdata = {
+       /*
+        * NOTE: Following table is sorted by register address in ascending
+        * order and then bitfield shift in descending order, as it is done
+        * in the User's Manual. When adding new entries, please make sure
+        * that the order is preserved, to avoid merge conflicts and make
+        * further work with defined data easier.
+        */
+
+       /* DIV_LEFTBUS */
+       DIV(CLK_DIV_GPL, "div_gpl", "div_gdl", DIV_LEFTBUS, 4, 3),
+       DIV(CLK_DIV_GDL, "div_gdl", "mout_gdl", DIV_LEFTBUS, 0, 4),
+
+       /* DIV_RIGHTBUS */
+       DIV(CLK_DIV_GPR, "div_gpr", "div_gdr", DIV_RIGHTBUS, 4, 3),
+       DIV(CLK_DIV_GDR, "div_gdr", "mout_gdr", DIV_RIGHTBUS, 0, 4),
+
+       /* DIV_TOP */
+       DIV(CLK_DIV_ACLK_400_MCUISP, "div_aclk_400_mcuisp",
+               "mout_aclk_400_mcuisp", DIV_TOP, 24, 3),
+       DIV(CLK_DIV_EBI, "div_ebi", "mout_ebi_1", DIV_TOP, 16, 3),
+       DIV(CLK_DIV_ACLK_200, "div_aclk_200", "mout_aclk_200", DIV_TOP, 12, 3),
+       DIV(CLK_DIV_ACLK_160, "div_aclk_160", "mout_aclk_160", DIV_TOP, 8, 3),
+       DIV(CLK_DIV_ACLK_100, "div_aclk_100", "mout_aclk_100", DIV_TOP, 4, 4),
+       DIV(CLK_DIV_ACLK_266, "div_aclk_266", "mout_aclk_266", DIV_TOP, 0, 3),
+
+       /* DIV_CAM */
+       DIV(CLK_DIV_CSIS1, "div_csis1", "mout_csis1", DIV_CAM, 28, 4),
+       DIV(CLK_DIV_CSIS0, "div_csis0", "mout_csis0", DIV_CAM, 24, 4),
+       DIV(CLK_DIV_CAM1, "div_cam1", "mout_cam1", DIV_CAM, 20, 4),
+       DIV(CLK_DIV_FIMC3_LCLK, "div_fimc3_lclk", "mout_fimc3_lclk", DIV_CAM,
+               12, 4),
+       DIV(CLK_DIV_FIMC2_LCLK, "div_fimc2_lclk", "mout_fimc2_lclk", DIV_CAM,
+               8, 4),
+       DIV(CLK_DIV_FIMC1_LCLK, "div_fimc1_lclk", "mout_fimc1_lclk", DIV_CAM,
+               4, 4),
+       DIV(CLK_DIV_FIMC0_LCLK, "div_fimc0_lclk", "mout_fimc0_lclk", DIV_CAM,
+               0, 4),
+
+       /* DIV_TV */
+       DIV(CLK_DIV_TV_BLK, "div_tv_blk", "mout_g3d_pll", DIV_TV, 0, 4),
+
+       /* DIV_MFC */
+       DIV(CLK_DIV_MFC, "div_mfc", "mout_mfc", DIV_MFC, 0, 4),
+
+       /* DIV_G3D */
+       DIV(CLK_DIV_G3D, "div_g3d", "mout_g3d", DIV_G3D, 0, 4),
+
+       /* DIV_LCD */
+       DIV_F(CLK_DIV_MIPI0_PRE, "div_mipi0_pre", "div_mipi0", DIV_LCD, 20, 4,
+               CLK_SET_RATE_PARENT, 0),
+       DIV(CLK_DIV_MIPI0, "div_mipi0", "mout_mipi0", DIV_LCD, 16, 4),
+       DIV(CLK_DIV_FIMD0, "div_fimd0", "mout_fimd0", DIV_LCD, 0, 4),
+
+       /* DIV_ISP */
+       DIV(CLK_DIV_UART_ISP, "div_uart_isp", "mout_uart_isp", DIV_ISP, 28, 4),
+       DIV_F(CLK_DIV_SPI1_ISP_PRE, "div_spi1_isp_pre", "div_spi1_isp",
+               DIV_ISP, 20, 8, CLK_SET_RATE_PARENT, 0),
+       DIV(CLK_DIV_SPI1_ISP, "div_spi1_isp", "mout_spi1_isp", DIV_ISP, 16, 4),
+       DIV_F(CLK_DIV_SPI0_ISP_PRE, "div_spi0_isp_pre", "div_spi0_isp",
+               DIV_ISP, 8, 8, CLK_SET_RATE_PARENT, 0),
+       DIV(CLK_DIV_SPI0_ISP, "div_spi0_isp", "mout_spi0_isp", DIV_ISP, 4, 4),
+       DIV(CLK_DIV_PWM_ISP, "div_pwm_isp", "mout_pwm_isp", DIV_ISP, 0, 4),
+
+       /* DIV_MAUDIO */
+       DIV(CLK_DIV_PCM0, "div_pcm0", "div_audio0", DIV_MAUDIO, 4, 8),
+       DIV(CLK_DIV_AUDIO0, "div_audio0", "mout_audio0", DIV_MAUDIO, 0, 4),
+
+       /* DIV_FSYS0 */
+       DIV_F(CLK_DIV_TSADC_PRE, "div_tsadc_pre", "div_tsadc", DIV_FSYS0, 8, 8,
+               CLK_SET_RATE_PARENT, 0),
+       DIV(CLK_DIV_TSADC, "div_tsadc", "mout_tsadc", DIV_FSYS0, 0, 4),
+
+       /* DIV_FSYS1 */
+       DIV_F(CLK_DIV_MMC1_PRE, "div_mmc1_pre", "div_mmc1", DIV_FSYS1, 24, 8,
+               CLK_SET_RATE_PARENT, 0),
+       DIV(CLK_DIV_MMC1, "div_mmc1", "mout_mmc1", DIV_FSYS1, 16, 4),
+       DIV_F(CLK_DIV_MMC0_PRE, "div_mmc0_pre", "div_mmc0", DIV_FSYS1, 8, 8,
+               CLK_SET_RATE_PARENT, 0),
+       DIV(CLK_DIV_MMC0, "div_mmc0", "mout_mmc0", DIV_FSYS1, 0, 4),
+
+       /* DIV_FSYS2 */
+       DIV_F(CLK_DIV_MMC2_PRE, "div_mmc2_pre", "div_mmc2", DIV_FSYS2, 8, 8,
+               CLK_SET_RATE_PARENT, 0),
+       DIV_F(CLK_DIV_MMC2_PRE, "div_mmc2", "mout_mmc2", DIV_FSYS2, 0, 4,
+               CLK_SET_RATE_PARENT, 0),
+
+       /* DIV_PERIL0 */
+       DIV(CLK_DIV_UART3, "div_uart3", "mout_uart3", DIV_PERIL0, 12, 4),
+       DIV(CLK_DIV_UART2, "div_uart2", "mout_uart2", DIV_PERIL0, 8, 4),
+       DIV(CLK_DIV_UART1, "div_uart1", "mout_uart1", DIV_PERIL0, 4, 4),
+       DIV(CLK_DIV_UART0, "div_uart0", "mout_uart0", DIV_PERIL0, 0, 4),
+
+       /* DIV_PERIL1 */
+       DIV_F(CLK_DIV_SPI1_PRE, "div_spi1_pre", "div_spi1", DIV_PERIL1, 24, 8,
+               CLK_SET_RATE_PARENT, 0),
+       DIV(CLK_DIV_SPI1, "div_spi1", "mout_spi1", DIV_PERIL1, 16, 4),
+       DIV_F(CLK_DIV_SPI0_PRE, "div_spi0_pre", "div_spi0", DIV_PERIL1, 8, 8,
+               CLK_SET_RATE_PARENT, 0),
+       DIV(CLK_DIV_SPI0, "div_spi0", "mout_spi0", DIV_PERIL1, 0, 4),
+
+       /* DIV_PERIL2 */
+       DIV_F(CLK_DIV_SPI2_PRE, "div_spi2_pre", "div_spi2", DIV_PERIL2, 8, 8,
+               CLK_SET_RATE_PARENT, 0),
+       DIV(CLK_DIV_SPI2, "div_spi2", "mout_spi2", DIV_PERIL2, 0, 4),
+
+       /* DIV_PERIL4 */
+       DIV(CLK_DIV_PCM2, "div_pcm2", "div_audio2", DIV_PERIL4, 20, 8),
+       DIV(CLK_DIV_AUDIO2, "div_audio2", "mout_audio2", DIV_PERIL4, 16, 4),
+       DIV(CLK_DIV_PCM1, "div_pcm1", "div_audio1", DIV_PERIL4, 20, 8),
+       DIV(CLK_DIV_AUDIO1, "div_audio1", "mout_audio1", DIV_PERIL4, 0, 4),
+
+       /* DIV_PERIL5 */
+       DIV(CLK_DIV_I2S1, "div_i2s1", "div_audio1", DIV_PERIL5, 0, 6),
+
+       /* DIV_CAM1 */
+       DIV(CLK_DIV_PXLASYNC_CSIS1_FIMC, "div_pxlasync_csis1_fimc",
+               "mout_pxlasync_csis1", DIV_CAM1, 24, 4),
+       DIV(CLK_DIV_PXLASYNC_CSIS0_FIMC, "div_pxlasync_csis0_fimc",
+               "mout_pxlasync_csis0", DIV_CAM1, 20, 4),
+       DIV(CLK_DIV_JPEG, "div_jpeg", "mout_jpeg", DIV_CAM1, 0, 4),
+
+       /* DIV_CPU0 */
+       DIV(CLK_DIV_CORE2, "div_core2", "div_core", DIV_CPU0, 28, 3),
+       DIV_F(CLK_DIV_APLL, "div_apll", "mout_apll", DIV_CPU0, 24, 3,
+                       CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+       DIV(CLK_DIV_PCLK_DBG, "div_pclk_dbg", "div_core2", DIV_CPU0, 20, 3),
+       DIV(CLK_DIV_ATB, "div_atb", "div_core2", DIV_CPU0, 16, 3),
+       DIV(CLK_DIV_PERIPH, "div_periph", "div_core2", DIV_CPU0, 12, 3),
+       DIV(CLK_DIV_COREM1, "div_corem1", "div_core2", DIV_CPU0, 8, 3),
+       DIV(CLK_DIV_COREM0, "div_corem0", "div_core2", DIV_CPU0, 4, 3),
+       DIV_F(CLK_DIV_CORE, "div_core", "mout_core", DIV_CPU0, 0, 3,
+               CLK_GET_RATE_NOCACHE, CLK_DIVIDER_READ_ONLY),
+
+       /* DIV_CPU1 */
+       DIV(CLK_DIV_HPM, "div_hpm", "div_copy", DIV_CPU1, 4, 3),
+       DIV(CLK_DIV_COPY, "div_copy", "mout_hpm", DIV_CPU1, 0, 3),
+};
+
+static struct samsung_gate_clock exynos4415_gate_clks[] __initdata = {
+       /*
+        * NOTE: Following table is sorted by register address in ascending
+        * order and then bitfield shift in descending order, as it is done
+        * in the User's Manual. When adding new entries, please make sure
+        * that the order is preserved, to avoid merge conflicts and make
+        * further work with defined data easier.
+        */
+
+       /* GATE_IP_LEFTBUS */
+       GATE(CLK_ASYNC_G3D, "async_g3d", "div_aclk_100", GATE_IP_LEFTBUS, 6,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_ASYNC_MFCL, "async_mfcl", "div_aclk_100", GATE_IP_LEFTBUS, 4,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_ASYNC_TVX, "async_tvx", "div_aclk_100", GATE_IP_LEFTBUS, 3,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_PPMULEFT, "ppmuleft", "div_aclk_100", GATE_IP_LEFTBUS, 1,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_GPIO_LEFT, "gpio_left", "div_aclk_100", GATE_IP_LEFTBUS, 0,
+               CLK_IGNORE_UNUSED, 0),
+
+       /* GATE_IP_IMAGE */
+       GATE(CLK_PPMUIMAGE, "ppmuimage", "div_aclk_100", GATE_IP_IMAGE,
+               9, 0, 0),
+       GATE(CLK_QEMDMA2, "qe_mdma2", "div_aclk_100", GATE_IP_IMAGE,
+               8, 0, 0),
+       GATE(CLK_QEROTATOR, "qe_rotator", "div_aclk_100", GATE_IP_IMAGE,
+               7, 0, 0),
+       GATE(CLK_SMMUMDMA2, "smmu_mdam2", "div_aclk_100", GATE_IP_IMAGE,
+               5, 0, 0),
+       GATE(CLK_SMMUROTATOR, "smmu_rotator", "div_aclk_100", GATE_IP_IMAGE,
+               4, 0, 0),
+       GATE(CLK_MDMA2, "mdma2", "div_aclk_100", GATE_IP_IMAGE, 2, 0, 0),
+       GATE(CLK_ROTATOR, "rotator", "div_aclk_100", GATE_IP_IMAGE, 1, 0, 0),
+
+       /* GATE_IP_RIGHTBUS */
+       GATE(CLK_ASYNC_ISPMX, "async_ispmx", "div_aclk_100",
+               GATE_IP_RIGHTBUS, 9, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_ASYNC_MAUDIOX, "async_maudiox", "div_aclk_100",
+               GATE_IP_RIGHTBUS, 7, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_ASYNC_MFCR, "async_mfcr", "div_aclk_100",
+               GATE_IP_RIGHTBUS, 6, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_ASYNC_FSYSD, "async_fsysd", "div_aclk_100",
+               GATE_IP_RIGHTBUS, 5, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_ASYNC_LCD0X, "async_lcd0x", "div_aclk_100",
+               GATE_IP_RIGHTBUS, 3, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_ASYNC_CAMX, "async_camx", "div_aclk_100",
+               GATE_IP_RIGHTBUS, 2, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_PPMURIGHT, "ppmuright", "div_aclk_100",
+               GATE_IP_RIGHTBUS, 1, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_GPIO_RIGHT, "gpio_right", "div_aclk_100",
+               GATE_IP_RIGHTBUS, 0, CLK_IGNORE_UNUSED, 0),
+
+       /* GATE_IP_PERIR */
+       GATE(CLK_ANTIRBK_APBIF, "antirbk_apbif", "div_aclk_100",
+               GATE_IP_PERIR, 24, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_EFUSE_WRITER_APBIF, "efuse_writer_apbif", "div_aclk_100",
+               GATE_IP_PERIR, 23, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_MONOCNT, "monocnt", "div_aclk_100", GATE_IP_PERIR, 22,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_TZPC6, "tzpc6", "div_aclk_100", GATE_IP_PERIR, 21,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_PROVISIONKEY1, "provisionkey1", "div_aclk_100",
+               GATE_IP_PERIR, 20, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_PROVISIONKEY0, "provisionkey0", "div_aclk_100",
+               GATE_IP_PERIR, 19, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_CMU_ISPPART, "cmu_isppart", "div_aclk_100", GATE_IP_PERIR, 18,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_TMU_APBIF, "tmu_apbif", "div_aclk_100",
+               GATE_IP_PERIR, 17, 0, 0),
+       GATE(CLK_KEYIF, "keyif", "div_aclk_100", GATE_IP_PERIR, 16, 0, 0),
+       GATE(CLK_RTC, "rtc", "div_aclk_100", GATE_IP_PERIR, 15, 0, 0),
+       GATE(CLK_WDT, "wdt", "div_aclk_100", GATE_IP_PERIR, 14, 0, 0),
+       GATE(CLK_MCT, "mct", "div_aclk_100", GATE_IP_PERIR, 13, 0, 0),
+       GATE(CLK_SECKEY, "seckey", "div_aclk_100", GATE_IP_PERIR, 12,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_HDMI_CEC, "hdmi_cec", "div_aclk_100", GATE_IP_PERIR, 11,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_TZPC5, "tzpc5", "div_aclk_100", GATE_IP_PERIR, 10,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_TZPC4, "tzpc4", "div_aclk_100", GATE_IP_PERIR, 9,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_TZPC3, "tzpc3", "div_aclk_100", GATE_IP_PERIR, 8,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_TZPC2, "tzpc2", "div_aclk_100", GATE_IP_PERIR, 7,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_TZPC1, "tzpc1", "div_aclk_100", GATE_IP_PERIR, 6,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_TZPC0, "tzpc0", "div_aclk_100", GATE_IP_PERIR, 5,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_CMU_COREPART, "cmu_corepart", "div_aclk_100", GATE_IP_PERIR, 4,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_CMU_TOPPART, "cmu_toppart", "div_aclk_100", GATE_IP_PERIR, 3,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_PMU_APBIF, "pmu_apbif", "div_aclk_100", GATE_IP_PERIR, 2,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_SYSREG, "sysreg", "div_aclk_100", GATE_IP_PERIR, 1,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_CHIP_ID, "chip_id", "div_aclk_100", GATE_IP_PERIR, 0,
+               CLK_IGNORE_UNUSED, 0),
+
+       /* GATE_SCLK_CAM - non-completed */
+       GATE(CLK_SCLK_PXLAYSNC_CSIS1_FIMC, "sclk_pxlasync_csis1_fimc",
+               "div_pxlasync_csis1_fimc", GATE_SCLK_CAM, 11,
+               CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_PXLAYSNC_CSIS0_FIMC, "sclk_pxlasync_csis0_fimc",
+               "div_pxlasync_csis0_fimc", GATE_SCLK_CAM,
+               10, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_JPEG, "sclk_jpeg", "div_jpeg",
+               GATE_SCLK_CAM, 8, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_CSIS1, "sclk_csis1", "div_csis1",
+               GATE_SCLK_CAM, 7, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_CSIS0, "sclk_csis0", "div_csis0",
+               GATE_SCLK_CAM, 6, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_CAM1, "sclk_cam1", "div_cam1",
+               GATE_SCLK_CAM, 5, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_FIMC3_LCLK, "sclk_fimc3_lclk", "div_fimc3_lclk",
+               GATE_SCLK_CAM, 3, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_FIMC2_LCLK, "sclk_fimc2_lclk", "div_fimc2_lclk",
+               GATE_SCLK_CAM, 2, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_FIMC1_LCLK, "sclk_fimc1_lclk", "div_fimc1_lclk",
+               GATE_SCLK_CAM, 1, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_FIMC0_LCLK, "sclk_fimc0_lclk", "div_fimc0_lclk",
+               GATE_SCLK_CAM, 0, CLK_SET_RATE_PARENT, 0),
+
+       /* GATE_SCLK_TV */
+       GATE(CLK_SCLK_PIXEL, "sclk_pixel", "div_tv_blk",
+               GATE_SCLK_TV, 3, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_HDMI, "sclk_hdmi", "mout_hdmi",
+               GATE_SCLK_TV, 2, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_MIXER, "sclk_mixer", "div_tv_blk",
+               GATE_SCLK_TV, 0, CLK_SET_RATE_PARENT, 0),
+
+       /* GATE_SCLK_MFC */
+       GATE(CLK_SCLK_MFC, "sclk_mfc", "div_mfc",
+               GATE_SCLK_MFC, 0, CLK_SET_RATE_PARENT, 0),
+
+       /* GATE_SCLK_G3D */
+       GATE(CLK_SCLK_G3D, "sclk_g3d", "div_g3d",
+               GATE_SCLK_G3D, 0, CLK_SET_RATE_PARENT, 0),
+
+       /* GATE_SCLK_LCD */
+       GATE(CLK_SCLK_MIPIDPHY4L, "sclk_mipidphy4l", "div_mipi0",
+               GATE_SCLK_LCD, 4, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_MIPI0, "sclk_mipi0", "div_mipi0_pre",
+               GATE_SCLK_LCD, 3, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_MDNIE0, "sclk_mdnie0", "div_fimd0",
+               GATE_SCLK_LCD, 1, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_FIMD0, "sclk_fimd0", "div_fimd0",
+               GATE_SCLK_LCD, 0, CLK_SET_RATE_PARENT, 0),
+
+       /* GATE_SCLK_MAUDIO */
+       GATE(CLK_SCLK_PCM0, "sclk_pcm0", "div_pcm0",
+               GATE_SCLK_MAUDIO, 1, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_AUDIO0, "sclk_audio0", "div_audio0",
+               GATE_SCLK_MAUDIO, 0, CLK_SET_RATE_PARENT, 0),
+
+       /* GATE_SCLK_FSYS */
+       GATE(CLK_SCLK_TSADC, "sclk_tsadc", "div_tsadc_pre",
+               GATE_SCLK_FSYS, 9, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_EBI, "sclk_ebi", "div_ebi",
+               GATE_SCLK_FSYS, 6, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_MMC2, "sclk_mmc2", "div_mmc2_pre",
+               GATE_SCLK_FSYS, 2, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_MMC1, "sclk_mmc1", "div_mmc1_pre",
+               GATE_SCLK_FSYS, 1, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_MMC0, "sclk_mmc0", "div_mmc0_pre",
+               GATE_SCLK_FSYS, 0, CLK_SET_RATE_PARENT, 0),
+
+       /* GATE_SCLK_PERIL */
+       GATE(CLK_SCLK_I2S, "sclk_i2s1", "div_i2s1",
+               GATE_SCLK_PERIL, 18, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_PCM2, "sclk_pcm2", "div_pcm2",
+               GATE_SCLK_PERIL, 16, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_PCM1, "sclk_pcm1", "div_pcm1",
+               GATE_SCLK_PERIL, 15, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_AUDIO2, "sclk_audio2", "div_audio2",
+               GATE_SCLK_PERIL, 14, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_AUDIO1, "sclk_audio1", "div_audio1",
+               GATE_SCLK_PERIL, 13, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_SPDIF, "sclk_spdif", "mout_spdif",
+               GATE_SCLK_PERIL, 10, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_SPI2, "sclk_spi2", "div_spi2_pre",
+               GATE_SCLK_PERIL, 8, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_SPI1, "sclk_spi1", "div_spi1_pre",
+               GATE_SCLK_PERIL, 7, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_SPI0, "sclk_spi0", "div_spi0_pre",
+               GATE_SCLK_PERIL, 6, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_UART3, "sclk_uart3", "div_uart3",
+               GATE_SCLK_PERIL, 3, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_UART2, "sclk_uart2", "div_uart2",
+               GATE_SCLK_PERIL, 2, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_UART1, "sclk_uart1", "div_uart1",
+               GATE_SCLK_PERIL, 1, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_UART0, "sclk_uart0", "div_uart0",
+               GATE_SCLK_PERIL, 0, CLK_SET_RATE_PARENT, 0),
+
+       /* GATE_IP_CAM */
+       GATE(CLK_SMMUFIMC_LITE2, "smmufimc_lite2", "div_aclk_160", GATE_IP_CAM,
+               22, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_FIMC_LITE2, "fimc_lite2", "div_aclk_160", GATE_IP_CAM,
+               20, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_PIXELASYNCM1, "pixelasyncm1", "div_aclk_160", GATE_IP_CAM,
+               18, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_PIXELASYNCM0, "pixelasyncm0", "div_aclk_160", GATE_IP_CAM,
+               17, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_PPMUCAMIF, "ppmucamif", "div_aclk_160", GATE_IP_CAM,
+               16, CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_SMMUJPEG, "smmujpeg", "div_aclk_160", GATE_IP_CAM, 11, 0, 0),
+       GATE(CLK_SMMUFIMC3, "smmufimc3", "div_aclk_160", GATE_IP_CAM, 10, 0, 0),
+       GATE(CLK_SMMUFIMC2, "smmufimc2", "div_aclk_160", GATE_IP_CAM, 9, 0, 0),
+       GATE(CLK_SMMUFIMC1, "smmufimc1", "div_aclk_160", GATE_IP_CAM, 8, 0, 0),
+       GATE(CLK_SMMUFIMC0, "smmufimc0", "div_aclk_160", GATE_IP_CAM, 7, 0, 0),
+       GATE(CLK_JPEG, "jpeg", "div_aclk_160", GATE_IP_CAM, 6, 0, 0),
+       GATE(CLK_CSIS1, "csis1", "div_aclk_160", GATE_IP_CAM, 5, 0, 0),
+       GATE(CLK_CSIS0, "csis0", "div_aclk_160", GATE_IP_CAM, 4, 0, 0),
+       GATE(CLK_FIMC3, "fimc3", "div_aclk_160", GATE_IP_CAM, 3, 0, 0),
+       GATE(CLK_FIMC2, "fimc2", "div_aclk_160", GATE_IP_CAM, 2, 0, 0),
+       GATE(CLK_FIMC1, "fimc1", "div_aclk_160", GATE_IP_CAM, 1, 0, 0),
+       GATE(CLK_FIMC0, "fimc0", "div_aclk_160", GATE_IP_CAM, 0, 0, 0),
+
+       /* GATE_IP_TV */
+       GATE(CLK_PPMUTV, "ppmutv", "div_aclk_100", GATE_IP_TV, 5, 0, 0),
+       GATE(CLK_SMMUTV, "smmutv", "div_aclk_100", GATE_IP_TV, 4, 0, 0),
+       GATE(CLK_HDMI, "hdmi", "div_aclk_100", GATE_IP_TV, 3, 0, 0),
+       GATE(CLK_MIXER, "mixer", "div_aclk_100", GATE_IP_TV, 1, 0, 0),
+       GATE(CLK_VP, "vp", "div_aclk_100", GATE_IP_TV, 0, 0, 0),
+
+       /* GATE_IP_MFC */
+       GATE(CLK_PPMUMFC_R, "ppmumfc_r", "div_aclk_200", GATE_IP_MFC, 4,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_PPMUMFC_L, "ppmumfc_l", "div_aclk_200", GATE_IP_MFC, 3,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_SMMUMFC_R, "smmumfc_r", "div_aclk_200", GATE_IP_MFC, 2, 0, 0),
+       GATE(CLK_SMMUMFC_L, "smmumfc_l", "div_aclk_200", GATE_IP_MFC, 1, 0, 0),
+       GATE(CLK_MFC, "mfc", "div_aclk_200", GATE_IP_MFC, 0, 0, 0),
+
+       /* GATE_IP_G3D */
+       GATE(CLK_PPMUG3D, "ppmug3d", "div_aclk_200", GATE_IP_G3D, 1,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_G3D, "g3d", "div_aclk_200", GATE_IP_G3D, 0, 0, 0),
+
+       /* GATE_IP_LCD */
+       GATE(CLK_PPMULCD0, "ppmulcd0", "div_aclk_160", GATE_IP_LCD, 5,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_SMMUFIMD0, "smmufimd0", "div_aclk_160", GATE_IP_LCD, 4, 0, 0),
+       GATE(CLK_DSIM0, "dsim0", "div_aclk_160", GATE_IP_LCD, 3, 0, 0),
+       GATE(CLK_SMIES, "smies", "div_aclk_160", GATE_IP_LCD, 2, 0, 0),
+       GATE(CLK_MIE0, "mie0", "div_aclk_160", GATE_IP_LCD, 1, 0, 0),
+       GATE(CLK_FIMD0, "fimd0", "div_aclk_160", GATE_IP_LCD, 0, 0, 0),
+
+       /* GATE_IP_FSYS */
+       GATE(CLK_TSADC, "tsadc", "div_aclk_200", GATE_IP_FSYS, 20, 0, 0),
+       GATE(CLK_PPMUFILE, "ppmufile", "div_aclk_200", GATE_IP_FSYS, 17,
+               CLK_IGNORE_UNUSED, 0),
+       GATE(CLK_NFCON, "nfcon", "div_aclk_200", GATE_IP_FSYS, 16, 0, 0),
+       GATE(CLK_USBDEVICE, "usbdevice", "div_aclk_200", GATE_IP_FSYS, 13,
+               0, 0),
+       GATE(CLK_USBHOST, "usbhost", "div_aclk_200", GATE_IP_FSYS, 12, 0, 0),
+       GATE(CLK_SROMC, "sromc", "div_aclk_200", GATE_IP_FSYS, 11, 0, 0),
+       GATE(CLK_SDMMC2, "sdmmc2", "div_aclk_200", GATE_IP_FSYS, 7, 0, 0),
+       GATE(CLK_SDMMC1, "sdmmc1", "div_aclk_200", GATE_IP_FSYS, 6, 0, 0),
+       GATE(CLK_SDMMC0, "sdmmc0", "div_aclk_200", GATE_IP_FSYS, 5, 0, 0),
+       GATE(CLK_PDMA1, "pdma1", "div_aclk_200", GATE_IP_FSYS, 1, 0, 0),
+       GATE(CLK_PDMA0, "pdma0", "div_aclk_200", GATE_IP_FSYS, 0, 0, 0),
+
+       /* GATE_IP_PERIL */
+       GATE(CLK_SPDIF, "spdif", "div_aclk_100", GATE_IP_PERIL, 26, 0, 0),
+       GATE(CLK_PWM, "pwm", "div_aclk_100", GATE_IP_PERIL, 24, 0, 0),
+       GATE(CLK_PCM2, "pcm2", "div_aclk_100", GATE_IP_PERIL, 23, 0, 0),
+       GATE(CLK_PCM1, "pcm1", "div_aclk_100", GATE_IP_PERIL, 22, 0, 0),
+       GATE(CLK_I2S1, "i2s1", "div_aclk_100", GATE_IP_PERIL, 20, 0, 0),
+       GATE(CLK_SPI2, "spi2", "div_aclk_100", GATE_IP_PERIL, 18, 0, 0),
+       GATE(CLK_SPI1, "spi1", "div_aclk_100", GATE_IP_PERIL, 17, 0, 0),
+       GATE(CLK_SPI0, "spi0", "div_aclk_100", GATE_IP_PERIL, 16, 0, 0),
+       GATE(CLK_I2CHDMI, "i2chdmi", "div_aclk_100", GATE_IP_PERIL, 14, 0, 0),
+       GATE(CLK_I2C7, "i2c7", "div_aclk_100", GATE_IP_PERIL, 13, 0, 0),
+       GATE(CLK_I2C6, "i2c6", "div_aclk_100", GATE_IP_PERIL, 12, 0, 0),
+       GATE(CLK_I2C5, "i2c5", "div_aclk_100", GATE_IP_PERIL, 11, 0, 0),
+       GATE(CLK_I2C4, "i2c4", "div_aclk_100", GATE_IP_PERIL, 10, 0, 0),
+       GATE(CLK_I2C3, "i2c3", "div_aclk_100", GATE_IP_PERIL, 9, 0, 0),
+       GATE(CLK_I2C2, "i2c2", "div_aclk_100", GATE_IP_PERIL, 8, 0, 0),
+       GATE(CLK_I2C1, "i2c1", "div_aclk_100", GATE_IP_PERIL, 7, 0, 0),
+       GATE(CLK_I2C0, "i2c0", "div_aclk_100", GATE_IP_PERIL, 6, 0, 0),
+       GATE(CLK_UART3, "uart3", "div_aclk_100", GATE_IP_PERIL, 3, 0, 0),
+       GATE(CLK_UART2, "uart2", "div_aclk_100", GATE_IP_PERIL, 2, 0, 0),
+       GATE(CLK_UART1, "uart1", "div_aclk_100", GATE_IP_PERIL, 1, 0, 0),
+       GATE(CLK_UART0, "uart0", "div_aclk_100", GATE_IP_PERIL, 0, 0, 0),
+};
+
+/*
+ * APLL & MPLL & BPLL & ISP_PLL & DISP_PLL & G3D_PLL
+ */
+static struct samsung_pll_rate_table exynos4415_pll_rates[] = {
+       PLL_35XX_RATE(1600000000, 400, 3,  1),
+       PLL_35XX_RATE(1500000000, 250, 2,  1),
+       PLL_35XX_RATE(1400000000, 175, 3,  0),
+       PLL_35XX_RATE(1300000000, 325, 3,  1),
+       PLL_35XX_RATE(1200000000, 400, 4,  1),
+       PLL_35XX_RATE(1100000000, 275, 3,  1),
+       PLL_35XX_RATE(1066000000, 533, 6,  1),
+       PLL_35XX_RATE(1000000000, 250, 3,  1),
+       PLL_35XX_RATE(960000000,  320, 4,  1),
+       PLL_35XX_RATE(900000000,  300, 4,  1),
+       PLL_35XX_RATE(850000000,  425, 6,  1),
+       PLL_35XX_RATE(800000000,  200, 3,  1),
+       PLL_35XX_RATE(700000000,  175, 3,  1),
+       PLL_35XX_RATE(667000000,  667, 12, 1),
+       PLL_35XX_RATE(600000000,  400, 4,  2),
+       PLL_35XX_RATE(550000000,  275, 3,  2),
+       PLL_35XX_RATE(533000000,  533, 6,  2),
+       PLL_35XX_RATE(520000000,  260, 3,  2),
+       PLL_35XX_RATE(500000000,  250, 3,  2),
+       PLL_35XX_RATE(440000000,  220, 3,  2),
+       PLL_35XX_RATE(400000000,  200, 3,  2),
+       PLL_35XX_RATE(350000000,  175, 3,  2),
+       PLL_35XX_RATE(300000000,  300, 3,  3),
+       PLL_35XX_RATE(266000000,  266, 3,  3),
+       PLL_35XX_RATE(200000000,  200, 3,  3),
+       PLL_35XX_RATE(160000000,  160, 3,  3),
+       PLL_35XX_RATE(100000000,  200, 3,  4),
+       { /* sentinel */ }
+};
+
+/* EPLL */
+static struct samsung_pll_rate_table exynos4415_epll_rates[] = {
+       PLL_36XX_RATE(800000000, 200, 3, 1,     0),
+       PLL_36XX_RATE(288000000,  96, 2, 2,     0),
+       PLL_36XX_RATE(192000000, 128, 2, 3,     0),
+       PLL_36XX_RATE(144000000,  96, 2, 3,     0),
+       PLL_36XX_RATE(96000000,  128, 2, 4,     0),
+       PLL_36XX_RATE(84000000,  112, 2, 4,     0),
+       PLL_36XX_RATE(80750011,  107, 2, 4, 43691),
+       PLL_36XX_RATE(73728004,   98, 2, 4, 19923),
+       PLL_36XX_RATE(67987602,  271, 3, 5, 62285),
+       PLL_36XX_RATE(65911004,  175, 2, 5, 49982),
+       PLL_36XX_RATE(50000000,  200, 3, 5,     0),
+       PLL_36XX_RATE(49152003,  131, 2, 5,  4719),
+       PLL_36XX_RATE(48000000,  128, 2, 5,     0),
+       PLL_36XX_RATE(45250000,  181, 3, 5,     0),
+       { /* sentinel */ }
+};
+
+static struct samsung_pll_clock exynos4415_plls[nr_plls] __initdata = {
+       [apll] = PLL(pll_35xx, CLK_FOUT_APLL, "fout_apll", "fin_pll",
+                       APLL_LOCK, APLL_CON0, NULL),
+       [epll] = PLL(pll_36xx, CLK_FOUT_EPLL, "fout_epll", "fin_pll",
+                       EPLL_LOCK, EPLL_CON0, NULL),
+       [g3d_pll] = PLL(pll_35xx, CLK_FOUT_G3D_PLL, "fout_g3d_pll",
+                       "mout_g3d_pllsrc", G3D_PLL_LOCK, G3D_PLL_CON0, NULL),
+       [isp_pll] = PLL(pll_35xx, CLK_FOUT_ISP_PLL, "fout_isp_pll", "fin_pll",
+                       ISP_PLL_LOCK, ISP_PLL_CON0, NULL),
+       [disp_pll] = PLL(pll_35xx, CLK_FOUT_DISP_PLL, "fout_disp_pll",
+                       "fin_pll", DISP_PLL_LOCK, DISP_PLL_CON0, NULL),
+};
+
+static void __init exynos4415_cmu_init(struct device_node *np)
+{
+       void __iomem *reg_base;
+
+       reg_base = of_iomap(np, 0);
+       if (!reg_base)
+               panic("%s: failed to map registers\n", __func__);
+
+       exynos4415_ctx = samsung_clk_init(np, reg_base, CLK_NR_CLKS);
+       if (!exynos4415_ctx)
+               panic("%s: unable to allocate context.\n", __func__);
+
+       exynos4415_plls[apll].rate_table = exynos4415_pll_rates;
+       exynos4415_plls[epll].rate_table = exynos4415_epll_rates;
+       exynos4415_plls[g3d_pll].rate_table = exynos4415_pll_rates;
+       exynos4415_plls[isp_pll].rate_table = exynos4415_pll_rates;
+       exynos4415_plls[disp_pll].rate_table = exynos4415_pll_rates;
+
+       samsung_clk_register_fixed_factor(exynos4415_ctx,
+                               exynos4415_fixed_factor_clks,
+                               ARRAY_SIZE(exynos4415_fixed_factor_clks));
+       samsung_clk_register_fixed_rate(exynos4415_ctx,
+                               exynos4415_fixed_rate_clks,
+                               ARRAY_SIZE(exynos4415_fixed_rate_clks));
+
+       samsung_clk_register_pll(exynos4415_ctx, exynos4415_plls,
+                               ARRAY_SIZE(exynos4415_plls), reg_base);
+       samsung_clk_register_mux(exynos4415_ctx, exynos4415_mux_clks,
+                               ARRAY_SIZE(exynos4415_mux_clks));
+       samsung_clk_register_div(exynos4415_ctx, exynos4415_div_clks,
+                               ARRAY_SIZE(exynos4415_div_clks));
+       samsung_clk_register_gate(exynos4415_ctx, exynos4415_gate_clks,
+                               ARRAY_SIZE(exynos4415_gate_clks));
+
+       exynos4415_clk_sleep_init();
+
+       samsung_clk_of_add_provider(np, exynos4415_ctx);
+}
+CLK_OF_DECLARE(exynos4415_cmu, "samsung,exynos4415-cmu", exynos4415_cmu_init);
+
+/*
+ * CMU DMC
+ */
+
+#define MPLL_LOCK              0x008
+#define MPLL_CON0              0x108
+#define MPLL_CON1              0x10c
+#define MPLL_CON2              0x110
+#define BPLL_LOCK              0x118
+#define BPLL_CON0              0x218
+#define BPLL_CON1              0x21c
+#define BPLL_CON2              0x220
+#define SRC_DMC                        0x300
+#define DIV_DMC1               0x504
+
+enum exynos4415_dmc_plls {
+       mpll, bpll,
+       nr_dmc_plls,
+};
+
+static struct samsung_clk_provider *exynos4415_dmc_ctx;
+
+#ifdef CONFIG_PM_SLEEP
+static struct samsung_clk_reg_dump *exynos4415_dmc_clk_regs;
+
+static unsigned long exynos4415_cmu_dmc_clk_regs[] __initdata = {
+       MPLL_LOCK,
+       MPLL_CON0,
+       MPLL_CON1,
+       MPLL_CON2,
+       BPLL_LOCK,
+       BPLL_CON0,
+       BPLL_CON1,
+       BPLL_CON2,
+       SRC_DMC,
+       DIV_DMC1,
+};
+
+static int exynos4415_dmc_clk_suspend(void)
+{
+       samsung_clk_save(exynos4415_dmc_ctx->reg_base,
+                               exynos4415_dmc_clk_regs,
+                               ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
+       return 0;
+}
+
+static void exynos4415_dmc_clk_resume(void)
+{
+       samsung_clk_restore(exynos4415_dmc_ctx->reg_base,
+                               exynos4415_dmc_clk_regs,
+                               ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
+}
+
+static struct syscore_ops exynos4415_dmc_clk_syscore_ops = {
+       .suspend = exynos4415_dmc_clk_suspend,
+       .resume = exynos4415_dmc_clk_resume,
+};
+
+static void exynos4415_dmc_clk_sleep_init(void)
+{
+       exynos4415_dmc_clk_regs =
+               samsung_clk_alloc_reg_dump(exynos4415_cmu_dmc_clk_regs,
+                               ARRAY_SIZE(exynos4415_cmu_dmc_clk_regs));
+       if (!exynos4415_dmc_clk_regs) {
+               pr_warn("%s: Failed to allocate sleep save data\n", __func__);
+               return;
+       }
+
+       register_syscore_ops(&exynos4415_dmc_clk_syscore_ops);
+}
+#else
+static inline void exynos4415_dmc_clk_sleep_init(void) { }
+#endif /* CONFIG_PM_SLEEP */
+
+PNAME(mout_mpll_p)             = { "fin_pll", "fout_mpll", };
+PNAME(mout_bpll_p)             = { "fin_pll", "fout_bpll", };
+PNAME(mbpll_p)                 = { "mout_mpll", "mout_bpll", };
+
+static struct samsung_mux_clock exynos4415_dmc_mux_clks[] __initdata = {
+       MUX(CLK_DMC_MOUT_MPLL, "mout_mpll", mout_mpll_p, SRC_DMC, 12, 1),
+       MUX(CLK_DMC_MOUT_BPLL, "mout_bpll", mout_bpll_p, SRC_DMC, 10, 1),
+       MUX(CLK_DMC_MOUT_DPHY, "mout_dphy", mbpll_p, SRC_DMC, 8, 1),
+       MUX(CLK_DMC_MOUT_DMC_BUS, "mout_dmc_bus", mbpll_p, SRC_DMC, 4, 1),
+};
+
+static struct samsung_div_clock exynos4415_dmc_div_clks[] __initdata = {
+       DIV(CLK_DMC_DIV_DMC, "div_dmc", "div_dmc_pre", DIV_DMC1, 27, 3),
+       DIV(CLK_DMC_DIV_DPHY, "div_dphy", "mout_dphy", DIV_DMC1, 23, 3),
+       DIV(CLK_DMC_DIV_DMC_PRE, "div_dmc_pre", "mout_dmc_bus",
+               DIV_DMC1, 19, 2),
+       DIV(CLK_DMC_DIV_DMCP, "div_dmcp", "div_dmcd", DIV_DMC1, 15, 3),
+       DIV(CLK_DMC_DIV_DMCD, "div_dmcd", "div_dmc", DIV_DMC1, 11, 3),
+       DIV(CLK_DMC_DIV_MPLL_PRE, "div_mpll_pre", "mout_mpll", DIV_DMC1, 8, 2),
+};
+
+static struct samsung_pll_clock exynos4415_dmc_plls[nr_dmc_plls] __initdata = {
+       [mpll] = PLL(pll_35xx, CLK_DMC_FOUT_MPLL, "fout_mpll", "fin_pll",
+               MPLL_LOCK, MPLL_CON0, NULL),
+       [bpll] = PLL(pll_35xx, CLK_DMC_FOUT_BPLL, "fout_bpll", "fin_pll",
+               BPLL_LOCK, BPLL_CON0, NULL),
+};
+
+static void __init exynos4415_cmu_dmc_init(struct device_node *np)
+{
+       void __iomem *reg_base;
+
+       reg_base = of_iomap(np, 0);
+       if (!reg_base)
+               panic("%s: failed to map registers\n", __func__);
+
+       exynos4415_dmc_ctx = samsung_clk_init(np, reg_base, NR_CLKS_DMC);
+       if (!exynos4415_dmc_ctx)
+               panic("%s: unable to allocate context.\n", __func__);
+
+       exynos4415_dmc_plls[mpll].rate_table = exynos4415_pll_rates;
+       exynos4415_dmc_plls[bpll].rate_table = exynos4415_pll_rates;
+
+       samsung_clk_register_pll(exynos4415_dmc_ctx, exynos4415_dmc_plls,
+                               ARRAY_SIZE(exynos4415_dmc_plls), reg_base);
+       samsung_clk_register_mux(exynos4415_dmc_ctx, exynos4415_dmc_mux_clks,
+                               ARRAY_SIZE(exynos4415_dmc_mux_clks));
+       samsung_clk_register_div(exynos4415_dmc_ctx, exynos4415_dmc_div_clks,
+                               ARRAY_SIZE(exynos4415_dmc_div_clks));
+
+       exynos4415_dmc_clk_sleep_init();
+
+       samsung_clk_of_add_provider(np, exynos4415_dmc_ctx);
+}
+CLK_OF_DECLARE(exynos4415_cmu_dmc, "samsung,exynos4415-cmu-dmc",
+               exynos4415_cmu_dmc_init);
index 2527e39aadcfbfafab2a0b5c97d4aab2e4809458..e2e5193d10490b0295805023c448c1c0b685d02d 100644 (file)
 
 #include <linux/clk.h>
 #include <linux/clkdev.h>
-#include <linux/clk-provider.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
-#include <linux/syscore_ops.h>
 
 #include "clk-exynos5260.h"
 #include "clk.h"
 
 #include <dt-bindings/clock/exynos5260-clk.h>
 
-static LIST_HEAD(clock_reg_cache_list);
-
-struct exynos5260_clock_reg_cache {
-       struct list_head node;
-       void __iomem *reg_base;
-       struct samsung_clk_reg_dump *rdump;
-       unsigned int rd_num;
-};
-
-struct exynos5260_cmu_info {
-       /* list of pll clocks and respective count */
-       struct samsung_pll_clock *pll_clks;
-       unsigned int nr_pll_clks;
-       /* list of mux clocks and respective count */
-       struct samsung_mux_clock *mux_clks;
-       unsigned int nr_mux_clks;
-       /* list of div clocks and respective count */
-       struct samsung_div_clock *div_clks;
-       unsigned int nr_div_clks;
-       /* list of gate clocks and respective count */
-       struct samsung_gate_clock *gate_clks;
-       unsigned int nr_gate_clks;
-       /* list of fixed clocks and respective count */
-       struct samsung_fixed_rate_clock *fixed_clks;
-       unsigned int nr_fixed_clks;
-       /* total number of clocks with IDs assigned*/
-       unsigned int nr_clk_ids;
-
-       /* list and number of clocks registers */
-       unsigned long *clk_regs;
-       unsigned int nr_clk_regs;
-};
-
 /*
  * Applicable for all 2550 Type PLLS for Exynos5260, listed below
  * DISP_PLL, EGL_PLL, KFC_PLL, MEM_PLL, BUS_PLL, MEDIA_PLL, G3D_PLL.
@@ -113,104 +78,6 @@ static struct samsung_pll_rate_table pll2650_24mhz_tbl[] __initdata = {
        PLL_36XX_RATE(66000000, 176, 2, 5, 0),
 };
 
-#ifdef CONFIG_PM_SLEEP
-
-static int exynos5260_clk_suspend(void)
-{
-       struct exynos5260_clock_reg_cache *cache;
-
-       list_for_each_entry(cache, &clock_reg_cache_list, node)
-               samsung_clk_save(cache->reg_base, cache->rdump,
-                               cache->rd_num);
-
-       return 0;
-}
-
-static void exynos5260_clk_resume(void)
-{
-       struct exynos5260_clock_reg_cache *cache;
-
-       list_for_each_entry(cache, &clock_reg_cache_list, node)
-               samsung_clk_restore(cache->reg_base, cache->rdump,
-                               cache->rd_num);
-}
-
-static struct syscore_ops exynos5260_clk_syscore_ops = {
-       .suspend = exynos5260_clk_suspend,
-       .resume = exynos5260_clk_resume,
-};
-
-static void exynos5260_clk_sleep_init(void __iomem *reg_base,
-                       unsigned long *rdump,
-                       unsigned long nr_rdump)
-{
-       struct exynos5260_clock_reg_cache *reg_cache;
-
-       reg_cache = kzalloc(sizeof(struct exynos5260_clock_reg_cache),
-                       GFP_KERNEL);
-       if (!reg_cache)
-               panic("could not allocate register cache.\n");
-
-       reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump);
-
-       if (!reg_cache->rdump)
-               panic("could not allocate register dump storage.\n");
-
-       if (list_empty(&clock_reg_cache_list))
-               register_syscore_ops(&exynos5260_clk_syscore_ops);
-
-       reg_cache->rd_num = nr_rdump;
-       reg_cache->reg_base = reg_base;
-       list_add_tail(&reg_cache->node, &clock_reg_cache_list);
-}
-
-#else
-static void exynos5260_clk_sleep_init(void __iomem *reg_base,
-                       unsigned long *rdump,
-                       unsigned long nr_rdump){}
-#endif
-
-/*
- * Common function which registers plls, muxes, dividers and gates
- * for each CMU. It also add CMU register list to register cache.
- */
-
-void __init exynos5260_cmu_register_one(struct device_node *np,
-                       struct exynos5260_cmu_info *cmu)
-{
-       void __iomem *reg_base;
-       struct samsung_clk_provider *ctx;
-
-       reg_base = of_iomap(np, 0);
-       if (!reg_base)
-               panic("%s: failed to map registers\n", __func__);
-
-       ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
-       if (!ctx)
-               panic("%s: unable to alllocate ctx\n", __func__);
-
-       if (cmu->pll_clks)
-               samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
-                       reg_base);
-       if (cmu->mux_clks)
-               samsung_clk_register_mux(ctx,  cmu->mux_clks,
-                       cmu->nr_mux_clks);
-       if (cmu->div_clks)
-               samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
-       if (cmu->gate_clks)
-               samsung_clk_register_gate(ctx, cmu->gate_clks,
-                       cmu->nr_gate_clks);
-       if (cmu->fixed_clks)
-               samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
-                       cmu->nr_fixed_clks);
-       if (cmu->clk_regs)
-               exynos5260_clk_sleep_init(reg_base, cmu->clk_regs,
-                       cmu->nr_clk_regs);
-
-       samsung_clk_of_add_provider(np, ctx);
-}
-
-
 /* CMU_AUD */
 
 static unsigned long aud_clk_regs[] __initdata = {
@@ -268,7 +135,7 @@ struct samsung_gate_clock aud_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_aud_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.mux_clks = aud_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(aud_mux_clks);
@@ -280,7 +147,7 @@ static void __init exynos5260_clk_aud_init(struct device_node *np)
        cmu.clk_regs = aud_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(aud_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_aud, "samsung,exynos5260-clock-aud",
@@ -458,7 +325,7 @@ struct samsung_gate_clock disp_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_disp_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.mux_clks = disp_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(disp_mux_clks);
@@ -470,7 +337,7 @@ static void __init exynos5260_clk_disp_init(struct device_node *np)
        cmu.clk_regs = disp_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(disp_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_disp, "samsung,exynos5260-clock-disp",
@@ -522,7 +389,7 @@ static struct samsung_pll_clock egl_pll_clks[] __initdata = {
 
 static void __init exynos5260_clk_egl_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.pll_clks = egl_pll_clks;
        cmu.nr_pll_clks =  ARRAY_SIZE(egl_pll_clks);
@@ -534,7 +401,7 @@ static void __init exynos5260_clk_egl_init(struct device_node *np)
        cmu.clk_regs = egl_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(egl_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_egl, "samsung,exynos5260-clock-egl",
@@ -624,7 +491,7 @@ struct samsung_gate_clock fsys_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_fsys_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.mux_clks = fsys_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(fsys_mux_clks);
@@ -634,7 +501,7 @@ static void __init exynos5260_clk_fsys_init(struct device_node *np)
        cmu.clk_regs = fsys_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(fsys_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_fsys, "samsung,exynos5260-clock-fsys",
@@ -713,7 +580,7 @@ struct samsung_gate_clock g2d_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_g2d_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.mux_clks = g2d_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(g2d_mux_clks);
@@ -725,7 +592,7 @@ static void __init exynos5260_clk_g2d_init(struct device_node *np)
        cmu.clk_regs = g2d_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(g2d_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_g2d, "samsung,exynos5260-clock-g2d",
@@ -774,7 +641,7 @@ static struct samsung_pll_clock g3d_pll_clks[] __initdata = {
 
 static void __init exynos5260_clk_g3d_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.pll_clks = g3d_pll_clks;
        cmu.nr_pll_clks =  ARRAY_SIZE(g3d_pll_clks);
@@ -788,7 +655,7 @@ static void __init exynos5260_clk_g3d_init(struct device_node *np)
        cmu.clk_regs = g3d_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(g3d_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_g3d, "samsung,exynos5260-clock-g3d",
@@ -909,7 +776,7 @@ struct samsung_gate_clock gscl_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_gscl_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.mux_clks = gscl_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(gscl_mux_clks);
@@ -921,7 +788,7 @@ static void __init exynos5260_clk_gscl_init(struct device_node *np)
        cmu.clk_regs = gscl_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(gscl_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_gscl, "samsung,exynos5260-clock-gscl",
@@ -1028,7 +895,7 @@ struct samsung_gate_clock isp_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_isp_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.mux_clks = isp_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(isp_mux_clks);
@@ -1040,7 +907,7 @@ static void __init exynos5260_clk_isp_init(struct device_node *np)
        cmu.clk_regs = isp_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(isp_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_isp, "samsung,exynos5260-clock-isp",
@@ -1092,7 +959,7 @@ static struct samsung_pll_clock kfc_pll_clks[] __initdata = {
 
 static void __init exynos5260_clk_kfc_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.pll_clks = kfc_pll_clks;
        cmu.nr_pll_clks =  ARRAY_SIZE(kfc_pll_clks);
@@ -1104,7 +971,7 @@ static void __init exynos5260_clk_kfc_init(struct device_node *np)
        cmu.clk_regs = kfc_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(kfc_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_kfc, "samsung,exynos5260-clock-kfc",
@@ -1148,7 +1015,7 @@ struct samsung_gate_clock mfc_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_mfc_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.mux_clks = mfc_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(mfc_mux_clks);
@@ -1160,7 +1027,7 @@ static void __init exynos5260_clk_mfc_init(struct device_node *np)
        cmu.clk_regs = mfc_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(mfc_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_mfc, "samsung,exynos5260-clock-mfc",
@@ -1295,7 +1162,7 @@ static struct samsung_pll_clock mif_pll_clks[] __initdata = {
 
 static void __init exynos5260_clk_mif_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.pll_clks = mif_pll_clks;
        cmu.nr_pll_clks =  ARRAY_SIZE(mif_pll_clks);
@@ -1309,7 +1176,7 @@ static void __init exynos5260_clk_mif_init(struct device_node *np)
        cmu.clk_regs = mif_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(mif_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_mif, "samsung,exynos5260-clock-mif",
@@ -1503,7 +1370,7 @@ struct samsung_gate_clock peri_gate_clks[] __initdata = {
 
 static void __init exynos5260_clk_peri_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.mux_clks = peri_mux_clks;
        cmu.nr_mux_clks = ARRAY_SIZE(peri_mux_clks);
@@ -1515,7 +1382,7 @@ static void __init exynos5260_clk_peri_init(struct device_node *np)
        cmu.clk_regs = peri_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(peri_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_peri, "samsung,exynos5260-clock-peri",
@@ -1959,7 +1826,7 @@ static struct samsung_pll_clock top_pll_clks[] __initdata = {
 
 static void __init exynos5260_clk_top_init(struct device_node *np)
 {
-       struct exynos5260_cmu_info cmu = {0};
+       struct samsung_cmu_info cmu = {0};
 
        cmu.pll_clks = top_pll_clks;
        cmu.nr_pll_clks =  ARRAY_SIZE(top_pll_clks);
@@ -1975,7 +1842,7 @@ static void __init exynos5260_clk_top_init(struct device_node *np)
        cmu.clk_regs = top_clk_regs;
        cmu.nr_clk_regs = ARRAY_SIZE(top_clk_regs);
 
-       exynos5260_cmu_register_one(np, &cmu);
+       samsung_cmu_register_one(np, &cmu);
 }
 
 CLK_OF_DECLARE(exynos5260_clk_top, "samsung,exynos5260-clock-top",
diff --git a/drivers/clk/samsung/clk-exynos7.c b/drivers/clk/samsung/clk-exynos7.c
new file mode 100644 (file)
index 0000000..ea4483b
--- /dev/null
@@ -0,0 +1,743 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+
+#include "clk.h"
+#include <dt-bindings/clock/exynos7-clk.h>
+
+/* Register Offset definitions for CMU_TOPC (0x10570000) */
+#define CC_PLL_LOCK            0x0000
+#define BUS0_PLL_LOCK          0x0004
+#define BUS1_DPLL_LOCK         0x0008
+#define MFC_PLL_LOCK           0x000C
+#define AUD_PLL_LOCK           0x0010
+#define CC_PLL_CON0            0x0100
+#define BUS0_PLL_CON0          0x0110
+#define BUS1_DPLL_CON0         0x0120
+#define MFC_PLL_CON0           0x0130
+#define AUD_PLL_CON0           0x0140
+#define MUX_SEL_TOPC0          0x0200
+#define MUX_SEL_TOPC1          0x0204
+#define MUX_SEL_TOPC2          0x0208
+#define MUX_SEL_TOPC3          0x020C
+#define DIV_TOPC0              0x0600
+#define DIV_TOPC1              0x0604
+#define DIV_TOPC3              0x060C
+
+static struct samsung_fixed_factor_clock topc_fixed_factor_clks[] __initdata = {
+       FFACTOR(0, "ffac_topc_bus0_pll_div2", "mout_bus0_pll_ctrl", 1, 2, 0),
+       FFACTOR(0, "ffac_topc_bus0_pll_div4",
+               "ffac_topc_bus0_pll_div2", 1, 2, 0),
+       FFACTOR(0, "ffac_topc_bus1_pll_div2", "mout_bus1_pll_ctrl", 1, 2, 0),
+       FFACTOR(0, "ffac_topc_cc_pll_div2", "mout_cc_pll_ctrl", 1, 2, 0),
+       FFACTOR(0, "ffac_topc_mfc_pll_div2", "mout_mfc_pll_ctrl", 1, 2, 0),
+};
+
+/* List of parent clocks for Muxes in CMU_TOPC */
+PNAME(mout_bus0_pll_ctrl_p)    = { "fin_pll", "fout_bus0_pll" };
+PNAME(mout_bus1_pll_ctrl_p)    = { "fin_pll", "fout_bus1_pll" };
+PNAME(mout_cc_pll_ctrl_p)      = { "fin_pll", "fout_cc_pll" };
+PNAME(mout_mfc_pll_ctrl_p)     = { "fin_pll", "fout_mfc_pll" };
+
+PNAME(mout_topc_group2) = { "mout_sclk_bus0_pll_cmuc",
+       "mout_sclk_bus1_pll_cmuc", "mout_sclk_cc_pll_cmuc",
+       "mout_sclk_mfc_pll_cmuc" };
+
+PNAME(mout_sclk_bus0_pll_cmuc_p) = { "mout_bus0_pll_ctrl",
+       "ffac_topc_bus0_pll_div2", "ffac_topc_bus0_pll_div4"};
+PNAME(mout_sclk_bus1_pll_cmuc_p) = { "mout_bus1_pll_ctrl",
+       "ffac_topc_bus1_pll_div2"};
+PNAME(mout_sclk_cc_pll_cmuc_p) = { "mout_cc_pll_ctrl",
+       "ffac_topc_cc_pll_div2"};
+PNAME(mout_sclk_mfc_pll_cmuc_p) = { "mout_mfc_pll_ctrl",
+       "ffac_topc_mfc_pll_div2"};
+
+
+PNAME(mout_sclk_bus0_pll_out_p) = {"mout_bus0_pll_ctrl",
+       "ffac_topc_bus0_pll_div2"};
+
+static unsigned long topc_clk_regs[] __initdata = {
+       CC_PLL_LOCK,
+       BUS0_PLL_LOCK,
+       BUS1_DPLL_LOCK,
+       MFC_PLL_LOCK,
+       AUD_PLL_LOCK,
+       CC_PLL_CON0,
+       BUS0_PLL_CON0,
+       BUS1_DPLL_CON0,
+       MFC_PLL_CON0,
+       AUD_PLL_CON0,
+       MUX_SEL_TOPC0,
+       MUX_SEL_TOPC1,
+       MUX_SEL_TOPC2,
+       MUX_SEL_TOPC3,
+       DIV_TOPC0,
+       DIV_TOPC1,
+       DIV_TOPC3,
+};
+
+static struct samsung_mux_clock topc_mux_clks[] __initdata = {
+       MUX(0, "mout_bus0_pll_ctrl", mout_bus0_pll_ctrl_p, MUX_SEL_TOPC0, 0, 1),
+       MUX(0, "mout_bus1_pll_ctrl", mout_bus1_pll_ctrl_p, MUX_SEL_TOPC0, 4, 1),
+       MUX(0, "mout_cc_pll_ctrl", mout_cc_pll_ctrl_p, MUX_SEL_TOPC0, 8, 1),
+       MUX(0, "mout_mfc_pll_ctrl", mout_mfc_pll_ctrl_p, MUX_SEL_TOPC0, 12, 1),
+
+       MUX(0, "mout_sclk_bus0_pll_cmuc", mout_sclk_bus0_pll_cmuc_p,
+               MUX_SEL_TOPC0, 16, 2),
+       MUX(0, "mout_sclk_bus1_pll_cmuc", mout_sclk_bus1_pll_cmuc_p,
+               MUX_SEL_TOPC0, 20, 1),
+       MUX(0, "mout_sclk_cc_pll_cmuc", mout_sclk_cc_pll_cmuc_p,
+               MUX_SEL_TOPC0, 24, 1),
+       MUX(0, "mout_sclk_mfc_pll_cmuc", mout_sclk_mfc_pll_cmuc_p,
+               MUX_SEL_TOPC0, 28, 1),
+
+       MUX(0, "mout_sclk_bus0_pll_out", mout_sclk_bus0_pll_out_p,
+               MUX_SEL_TOPC1, 16, 1),
+
+       MUX(0, "mout_aclk_ccore_133", mout_topc_group2, MUX_SEL_TOPC2, 4, 2),
+
+       MUX(0, "mout_aclk_peris_66", mout_topc_group2, MUX_SEL_TOPC3, 24, 2),
+};
+
+static struct samsung_div_clock topc_div_clks[] __initdata = {
+       DIV(DOUT_ACLK_CCORE_133, "dout_aclk_ccore_133", "mout_aclk_ccore_133",
+               DIV_TOPC0, 4, 4),
+
+       DIV(DOUT_ACLK_PERIS, "dout_aclk_peris_66", "mout_aclk_peris_66",
+               DIV_TOPC1, 24, 4),
+
+       DIV(DOUT_SCLK_BUS0_PLL, "dout_sclk_bus0_pll", "mout_sclk_bus0_pll_out",
+               DIV_TOPC3, 0, 3),
+       DIV(DOUT_SCLK_BUS1_PLL, "dout_sclk_bus1_pll", "mout_bus1_pll_ctrl",
+               DIV_TOPC3, 8, 3),
+       DIV(DOUT_SCLK_CC_PLL, "dout_sclk_cc_pll", "mout_cc_pll_ctrl",
+               DIV_TOPC3, 12, 3),
+       DIV(DOUT_SCLK_MFC_PLL, "dout_sclk_mfc_pll", "mout_mfc_pll_ctrl",
+               DIV_TOPC3, 16, 3),
+};
+
+static struct samsung_pll_clock topc_pll_clks[] __initdata = {
+       PLL(pll_1451x, 0, "fout_bus0_pll", "fin_pll", BUS0_PLL_LOCK,
+               BUS0_PLL_CON0, NULL),
+       PLL(pll_1452x, 0, "fout_cc_pll", "fin_pll", CC_PLL_LOCK,
+               CC_PLL_CON0, NULL),
+       PLL(pll_1452x, 0, "fout_bus1_pll", "fin_pll", BUS1_DPLL_LOCK,
+               BUS1_DPLL_CON0, NULL),
+       PLL(pll_1452x, 0, "fout_mfc_pll", "fin_pll", MFC_PLL_LOCK,
+               MFC_PLL_CON0, NULL),
+       PLL(pll_1460x, 0, "fout_aud_pll", "fin_pll", AUD_PLL_LOCK,
+               AUD_PLL_CON0, NULL),
+};
+
+static struct samsung_cmu_info topc_cmu_info __initdata = {
+       .pll_clks               = topc_pll_clks,
+       .nr_pll_clks            = ARRAY_SIZE(topc_pll_clks),
+       .mux_clks               = topc_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(topc_mux_clks),
+       .div_clks               = topc_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(topc_div_clks),
+       .fixed_factor_clks      = topc_fixed_factor_clks,
+       .nr_fixed_factor_clks   = ARRAY_SIZE(topc_fixed_factor_clks),
+       .nr_clk_ids             = TOPC_NR_CLK,
+       .clk_regs               = topc_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(topc_clk_regs),
+};
+
+static void __init exynos7_clk_topc_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &topc_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_topc, "samsung,exynos7-clock-topc",
+       exynos7_clk_topc_init);
+
+/* Register Offset definitions for CMU_TOP0 (0x105D0000) */
+#define MUX_SEL_TOP00                  0x0200
+#define MUX_SEL_TOP01                  0x0204
+#define MUX_SEL_TOP03                  0x020C
+#define MUX_SEL_TOP0_PERIC3            0x023C
+#define DIV_TOP03                      0x060C
+#define DIV_TOP0_PERIC3                        0x063C
+#define ENABLE_SCLK_TOP0_PERIC3                0x0A3C
+
+/* List of parent clocks for Muxes in CMU_TOP0 */
+PNAME(mout_bus0_pll_p) = { "fin_pll", "dout_sclk_bus0_pll" };
+PNAME(mout_bus1_pll_p) = { "fin_pll", "dout_sclk_bus1_pll" };
+PNAME(mout_cc_pll_p)   = { "fin_pll", "dout_sclk_cc_pll" };
+PNAME(mout_mfc_pll_p)  = { "fin_pll", "dout_sclk_mfc_pll" };
+
+PNAME(mout_top0_half_bus0_pll_p) = {"mout_top0_bus0_pll",
+       "ffac_top0_bus0_pll_div2"};
+PNAME(mout_top0_half_bus1_pll_p) = {"mout_top0_bus1_pll",
+       "ffac_top0_bus1_pll_div2"};
+PNAME(mout_top0_half_cc_pll_p) = {"mout_top0_cc_pll",
+       "ffac_top0_cc_pll_div2"};
+PNAME(mout_top0_half_mfc_pll_p) = {"mout_top0_mfc_pll",
+       "ffac_top0_mfc_pll_div2"};
+
+PNAME(mout_top0_group1) = {"mout_top0_half_bus0_pll",
+       "mout_top0_half_bus1_pll", "mout_top0_half_cc_pll",
+       "mout_top0_half_mfc_pll"};
+
+static unsigned long top0_clk_regs[] __initdata = {
+       MUX_SEL_TOP00,
+       MUX_SEL_TOP01,
+       MUX_SEL_TOP03,
+       MUX_SEL_TOP0_PERIC3,
+       DIV_TOP03,
+       DIV_TOP0_PERIC3,
+       ENABLE_SCLK_TOP0_PERIC3,
+};
+
+static struct samsung_mux_clock top0_mux_clks[] __initdata = {
+       MUX(0, "mout_top0_mfc_pll", mout_mfc_pll_p, MUX_SEL_TOP00, 4, 1),
+       MUX(0, "mout_top0_cc_pll", mout_cc_pll_p, MUX_SEL_TOP00, 8, 1),
+       MUX(0, "mout_top0_bus1_pll", mout_bus1_pll_p, MUX_SEL_TOP00, 12, 1),
+       MUX(0, "mout_top0_bus0_pll", mout_bus0_pll_p, MUX_SEL_TOP00, 16, 1),
+
+       MUX(0, "mout_top0_half_mfc_pll", mout_top0_half_mfc_pll_p,
+               MUX_SEL_TOP01, 4, 1),
+       MUX(0, "mout_top0_half_cc_pll", mout_top0_half_cc_pll_p,
+               MUX_SEL_TOP01, 8, 1),
+       MUX(0, "mout_top0_half_bus1_pll", mout_top0_half_bus1_pll_p,
+               MUX_SEL_TOP01, 12, 1),
+       MUX(0, "mout_top0_half_bus0_pll", mout_top0_half_bus0_pll_p,
+               MUX_SEL_TOP01, 16, 1),
+
+       MUX(0, "mout_aclk_peric1_66", mout_top0_group1, MUX_SEL_TOP03, 12, 2),
+       MUX(0, "mout_aclk_peric0_66", mout_top0_group1, MUX_SEL_TOP03, 20, 2),
+
+       MUX(0, "mout_sclk_uart3", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 4, 2),
+       MUX(0, "mout_sclk_uart2", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 8, 2),
+       MUX(0, "mout_sclk_uart1", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 12, 2),
+       MUX(0, "mout_sclk_uart0", mout_top0_group1, MUX_SEL_TOP0_PERIC3, 16, 2),
+};
+
+static struct samsung_div_clock top0_div_clks[] __initdata = {
+       DIV(DOUT_ACLK_PERIC1, "dout_aclk_peric1_66", "mout_aclk_peric1_66",
+               DIV_TOP03, 12, 6),
+       DIV(DOUT_ACLK_PERIC0, "dout_aclk_peric0_66", "mout_aclk_peric0_66",
+               DIV_TOP03, 20, 6),
+
+       DIV(0, "dout_sclk_uart3", "mout_sclk_uart3", DIV_TOP0_PERIC3, 4, 4),
+       DIV(0, "dout_sclk_uart2", "mout_sclk_uart2", DIV_TOP0_PERIC3, 8, 4),
+       DIV(0, "dout_sclk_uart1", "mout_sclk_uart1", DIV_TOP0_PERIC3, 12, 4),
+       DIV(0, "dout_sclk_uart0", "mout_sclk_uart0", DIV_TOP0_PERIC3, 16, 4),
+};
+
+static struct samsung_gate_clock top0_gate_clks[] __initdata = {
+       GATE(CLK_SCLK_UART3, "sclk_uart3", "dout_sclk_uart3",
+               ENABLE_SCLK_TOP0_PERIC3, 4, 0, 0),
+       GATE(CLK_SCLK_UART2, "sclk_uart2", "dout_sclk_uart2",
+               ENABLE_SCLK_TOP0_PERIC3, 8, 0, 0),
+       GATE(CLK_SCLK_UART1, "sclk_uart1", "dout_sclk_uart1",
+               ENABLE_SCLK_TOP0_PERIC3, 12, 0, 0),
+       GATE(CLK_SCLK_UART0, "sclk_uart0", "dout_sclk_uart0",
+               ENABLE_SCLK_TOP0_PERIC3, 16, 0, 0),
+};
+
+static struct samsung_fixed_factor_clock top0_fixed_factor_clks[] __initdata = {
+       FFACTOR(0, "ffac_top0_bus0_pll_div2", "mout_top0_bus0_pll", 1, 2, 0),
+       FFACTOR(0, "ffac_top0_bus1_pll_div2", "mout_top0_bus1_pll", 1, 2, 0),
+       FFACTOR(0, "ffac_top0_cc_pll_div2", "mout_top0_cc_pll", 1, 2, 0),
+       FFACTOR(0, "ffac_top0_mfc_pll_div2", "mout_top0_mfc_pll", 1, 2, 0),
+};
+
+static struct samsung_cmu_info top0_cmu_info __initdata = {
+       .mux_clks               = top0_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(top0_mux_clks),
+       .div_clks               = top0_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(top0_div_clks),
+       .gate_clks              = top0_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(top0_gate_clks),
+       .fixed_factor_clks      = top0_fixed_factor_clks,
+       .nr_fixed_factor_clks   = ARRAY_SIZE(top0_fixed_factor_clks),
+       .nr_clk_ids             = TOP0_NR_CLK,
+       .clk_regs               = top0_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(top0_clk_regs),
+};
+
+static void __init exynos7_clk_top0_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &top0_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_top0, "samsung,exynos7-clock-top0",
+       exynos7_clk_top0_init);
+
+/* Register Offset definitions for CMU_TOP1 (0x105E0000) */
+#define MUX_SEL_TOP10                  0x0200
+#define MUX_SEL_TOP11                  0x0204
+#define MUX_SEL_TOP13                  0x020C
+#define MUX_SEL_TOP1_FSYS0             0x0224
+#define MUX_SEL_TOP1_FSYS1             0x0228
+#define DIV_TOP13                      0x060C
+#define DIV_TOP1_FSYS0                 0x0624
+#define DIV_TOP1_FSYS1                 0x0628
+#define ENABLE_ACLK_TOP13              0x080C
+#define ENABLE_SCLK_TOP1_FSYS0         0x0A24
+#define ENABLE_SCLK_TOP1_FSYS1         0x0A28
+
+/* List of parent clocks for Muxes in CMU_TOP1 */
+PNAME(mout_top1_bus0_pll_p)    = { "fin_pll", "dout_sclk_bus0_pll" };
+PNAME(mout_top1_bus1_pll_p)    = { "fin_pll", "dout_sclk_bus1_pll_b" };
+PNAME(mout_top1_cc_pll_p)      = { "fin_pll", "dout_sclk_cc_pll_b" };
+PNAME(mout_top1_mfc_pll_p)     = { "fin_pll", "dout_sclk_mfc_pll_b" };
+
+PNAME(mout_top1_half_bus0_pll_p) = {"mout_top1_bus0_pll",
+       "ffac_top1_bus0_pll_div2"};
+PNAME(mout_top1_half_bus1_pll_p) = {"mout_top1_bus1_pll",
+       "ffac_top1_bus1_pll_div2"};
+PNAME(mout_top1_half_cc_pll_p) = {"mout_top1_cc_pll",
+       "ffac_top1_cc_pll_div2"};
+PNAME(mout_top1_half_mfc_pll_p) = {"mout_top1_mfc_pll",
+       "ffac_top1_mfc_pll_div2"};
+
+PNAME(mout_top1_group1) = {"mout_top1_half_bus0_pll",
+       "mout_top1_half_bus1_pll", "mout_top1_half_cc_pll",
+       "mout_top1_half_mfc_pll"};
+
+static unsigned long top1_clk_regs[] __initdata = {
+       MUX_SEL_TOP10,
+       MUX_SEL_TOP11,
+       MUX_SEL_TOP13,
+       MUX_SEL_TOP1_FSYS0,
+       MUX_SEL_TOP1_FSYS1,
+       DIV_TOP13,
+       DIV_TOP1_FSYS0,
+       DIV_TOP1_FSYS1,
+       ENABLE_ACLK_TOP13,
+       ENABLE_SCLK_TOP1_FSYS0,
+       ENABLE_SCLK_TOP1_FSYS1,
+};
+
+static struct samsung_mux_clock top1_mux_clks[] __initdata = {
+       MUX(0, "mout_top1_mfc_pll", mout_top1_mfc_pll_p, MUX_SEL_TOP10, 4, 1),
+       MUX(0, "mout_top1_cc_pll", mout_top1_cc_pll_p, MUX_SEL_TOP10, 8, 1),
+       MUX(0, "mout_top1_bus1_pll", mout_top1_bus1_pll_p,
+               MUX_SEL_TOP10, 12, 1),
+       MUX(0, "mout_top1_bus0_pll", mout_top1_bus0_pll_p,
+               MUX_SEL_TOP10, 16, 1),
+
+       MUX(0, "mout_top1_half_mfc_pll", mout_top1_half_mfc_pll_p,
+               MUX_SEL_TOP11, 4, 1),
+       MUX(0, "mout_top1_half_cc_pll", mout_top1_half_cc_pll_p,
+               MUX_SEL_TOP11, 8, 1),
+       MUX(0, "mout_top1_half_bus1_pll", mout_top1_half_bus1_pll_p,
+               MUX_SEL_TOP11, 12, 1),
+       MUX(0, "mout_top1_half_bus0_pll", mout_top1_half_bus0_pll_p,
+               MUX_SEL_TOP11, 16, 1),
+
+       MUX(0, "mout_aclk_fsys1_200", mout_top1_group1, MUX_SEL_TOP13, 24, 2),
+       MUX(0, "mout_aclk_fsys0_200", mout_top1_group1, MUX_SEL_TOP13, 28, 2),
+
+       MUX(0, "mout_sclk_mmc2", mout_top1_group1, MUX_SEL_TOP1_FSYS0, 24, 2),
+
+       MUX(0, "mout_sclk_mmc1", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 24, 2),
+       MUX(0, "mout_sclk_mmc0", mout_top1_group1, MUX_SEL_TOP1_FSYS1, 28, 2),
+};
+
+static struct samsung_div_clock top1_div_clks[] __initdata = {
+       DIV(DOUT_ACLK_FSYS1_200, "dout_aclk_fsys1_200", "mout_aclk_fsys1_200",
+               DIV_TOP13, 24, 4),
+       DIV(DOUT_ACLK_FSYS0_200, "dout_aclk_fsys0_200", "mout_aclk_fsys0_200",
+               DIV_TOP13, 28, 4),
+
+       DIV(DOUT_SCLK_MMC2, "dout_sclk_mmc2", "mout_sclk_mmc2",
+               DIV_TOP1_FSYS0, 24, 4),
+
+       DIV(DOUT_SCLK_MMC1, "dout_sclk_mmc1", "mout_sclk_mmc1",
+               DIV_TOP1_FSYS1, 24, 4),
+       DIV(DOUT_SCLK_MMC0, "dout_sclk_mmc0", "mout_sclk_mmc0",
+               DIV_TOP1_FSYS1, 28, 4),
+};
+
+static struct samsung_gate_clock top1_gate_clks[] __initdata = {
+       GATE(CLK_SCLK_MMC2, "sclk_mmc2", "dout_sclk_mmc2",
+               ENABLE_SCLK_TOP1_FSYS0, 24, CLK_SET_RATE_PARENT, 0),
+
+       GATE(CLK_SCLK_MMC1, "sclk_mmc1", "dout_sclk_mmc1",
+               ENABLE_SCLK_TOP1_FSYS1, 24, CLK_SET_RATE_PARENT, 0),
+       GATE(CLK_SCLK_MMC0, "sclk_mmc0", "dout_sclk_mmc0",
+               ENABLE_SCLK_TOP1_FSYS1, 28, CLK_SET_RATE_PARENT, 0),
+};
+
+static struct samsung_fixed_factor_clock top1_fixed_factor_clks[] __initdata = {
+       FFACTOR(0, "ffac_top1_bus0_pll_div2", "mout_top1_bus0_pll", 1, 2, 0),
+       FFACTOR(0, "ffac_top1_bus1_pll_div2", "mout_top1_bus1_pll", 1, 2, 0),
+       FFACTOR(0, "ffac_top1_cc_pll_div2", "mout_top1_cc_pll", 1, 2, 0),
+       FFACTOR(0, "ffac_top1_mfc_pll_div2", "mout_top1_mfc_pll", 1, 2, 0),
+};
+
+static struct samsung_cmu_info top1_cmu_info __initdata = {
+       .mux_clks               = top1_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(top1_mux_clks),
+       .div_clks               = top1_div_clks,
+       .nr_div_clks            = ARRAY_SIZE(top1_div_clks),
+       .gate_clks              = top1_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(top1_gate_clks),
+       .fixed_factor_clks      = top1_fixed_factor_clks,
+       .nr_fixed_factor_clks   = ARRAY_SIZE(top1_fixed_factor_clks),
+       .nr_clk_ids             = TOP1_NR_CLK,
+       .clk_regs               = top1_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(top1_clk_regs),
+};
+
+static void __init exynos7_clk_top1_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &top1_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_top1, "samsung,exynos7-clock-top1",
+       exynos7_clk_top1_init);
+
+/* Register Offset definitions for CMU_CCORE (0x105B0000) */
+#define MUX_SEL_CCORE                  0x0200
+#define DIV_CCORE                      0x0600
+#define ENABLE_ACLK_CCORE0             0x0800
+#define ENABLE_ACLK_CCORE1             0x0804
+#define ENABLE_PCLK_CCORE              0x0900
+
+/*
+ * List of parent clocks for Muxes in CMU_CCORE
+ */
+PNAME(mout_aclk_ccore_133_p)   = { "fin_pll", "dout_aclk_ccore_133" };
+
+static unsigned long ccore_clk_regs[] __initdata = {
+       MUX_SEL_CCORE,
+       ENABLE_PCLK_CCORE,
+};
+
+static struct samsung_mux_clock ccore_mux_clks[] __initdata = {
+       MUX(0, "mout_aclk_ccore_133_user", mout_aclk_ccore_133_p,
+               MUX_SEL_CCORE, 1, 1),
+};
+
+static struct samsung_gate_clock ccore_gate_clks[] __initdata = {
+       GATE(PCLK_RTC, "pclk_rtc", "mout_aclk_ccore_133_user",
+               ENABLE_PCLK_CCORE, 8, 0, 0),
+};
+
+static struct samsung_cmu_info ccore_cmu_info __initdata = {
+       .mux_clks               = ccore_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(ccore_mux_clks),
+       .gate_clks              = ccore_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(ccore_gate_clks),
+       .nr_clk_ids             = CCORE_NR_CLK,
+       .clk_regs               = ccore_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(ccore_clk_regs),
+};
+
+static void __init exynos7_clk_ccore_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &ccore_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_ccore, "samsung,exynos7-clock-ccore",
+       exynos7_clk_ccore_init);
+
+/* Register Offset definitions for CMU_PERIC0 (0x13610000) */
+#define MUX_SEL_PERIC0                 0x0200
+#define ENABLE_PCLK_PERIC0             0x0900
+#define ENABLE_SCLK_PERIC0             0x0A00
+
+/* List of parent clocks for Muxes in CMU_PERIC0 */
+PNAME(mout_aclk_peric0_66_p)   = { "fin_pll", "dout_aclk_peric0_66" };
+PNAME(mout_sclk_uart0_p)       = { "fin_pll", "sclk_uart0" };
+
+static unsigned long peric0_clk_regs[] __initdata = {
+       MUX_SEL_PERIC0,
+       ENABLE_PCLK_PERIC0,
+       ENABLE_SCLK_PERIC0,
+};
+
+static struct samsung_mux_clock peric0_mux_clks[] __initdata = {
+       MUX(0, "mout_aclk_peric0_66_user", mout_aclk_peric0_66_p,
+               MUX_SEL_PERIC0, 0, 1),
+       MUX(0, "mout_sclk_uart0_user", mout_sclk_uart0_p,
+               MUX_SEL_PERIC0, 16, 1),
+};
+
+static struct samsung_gate_clock peric0_gate_clks[] __initdata = {
+       GATE(PCLK_HSI2C0, "pclk_hsi2c0", "mout_aclk_peric0_66_user",
+               ENABLE_PCLK_PERIC0, 8, 0, 0),
+       GATE(PCLK_HSI2C1, "pclk_hsi2c1", "mout_aclk_peric0_66_user",
+               ENABLE_PCLK_PERIC0, 9, 0, 0),
+       GATE(PCLK_HSI2C4, "pclk_hsi2c4", "mout_aclk_peric0_66_user",
+               ENABLE_PCLK_PERIC0, 10, 0, 0),
+       GATE(PCLK_HSI2C5, "pclk_hsi2c5", "mout_aclk_peric0_66_user",
+               ENABLE_PCLK_PERIC0, 11, 0, 0),
+       GATE(PCLK_HSI2C9, "pclk_hsi2c9", "mout_aclk_peric0_66_user",
+               ENABLE_PCLK_PERIC0, 12, 0, 0),
+       GATE(PCLK_HSI2C10, "pclk_hsi2c10", "mout_aclk_peric0_66_user",
+               ENABLE_PCLK_PERIC0, 13, 0, 0),
+       GATE(PCLK_HSI2C11, "pclk_hsi2c11", "mout_aclk_peric0_66_user",
+               ENABLE_PCLK_PERIC0, 14, 0, 0),
+       GATE(PCLK_UART0, "pclk_uart0", "mout_aclk_peric0_66_user",
+               ENABLE_PCLK_PERIC0, 16, 0, 0),
+       GATE(PCLK_ADCIF, "pclk_adcif", "mout_aclk_peric0_66_user",
+               ENABLE_PCLK_PERIC0, 20, 0, 0),
+       GATE(PCLK_PWM, "pclk_pwm", "mout_aclk_peric0_66_user",
+               ENABLE_PCLK_PERIC0, 21, 0, 0),
+
+       GATE(SCLK_UART0, "sclk_uart0_user", "mout_sclk_uart0_user",
+               ENABLE_SCLK_PERIC0, 16, 0, 0),
+       GATE(SCLK_PWM, "sclk_pwm", "fin_pll", ENABLE_SCLK_PERIC0, 21, 0, 0),
+};
+
+static struct samsung_cmu_info peric0_cmu_info __initdata = {
+       .mux_clks               = peric0_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(peric0_mux_clks),
+       .gate_clks              = peric0_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(peric0_gate_clks),
+       .nr_clk_ids             = PERIC0_NR_CLK,
+       .clk_regs               = peric0_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(peric0_clk_regs),
+};
+
+static void __init exynos7_clk_peric0_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &peric0_cmu_info);
+}
+
+/* Register Offset definitions for CMU_PERIC1 (0x14C80000) */
+#define MUX_SEL_PERIC10                        0x0200
+#define MUX_SEL_PERIC11                        0x0204
+#define ENABLE_PCLK_PERIC1             0x0900
+#define ENABLE_SCLK_PERIC10            0x0A00
+
+CLK_OF_DECLARE(exynos7_clk_peric0, "samsung,exynos7-clock-peric0",
+       exynos7_clk_peric0_init);
+
+/* List of parent clocks for Muxes in CMU_PERIC1 */
+PNAME(mout_aclk_peric1_66_p)   = { "fin_pll", "dout_aclk_peric1_66" };
+PNAME(mout_sclk_uart1_p)       = { "fin_pll", "sclk_uart1" };
+PNAME(mout_sclk_uart2_p)       = { "fin_pll", "sclk_uart2" };
+PNAME(mout_sclk_uart3_p)       = { "fin_pll", "sclk_uart3" };
+
+static unsigned long peric1_clk_regs[] __initdata = {
+       MUX_SEL_PERIC10,
+       MUX_SEL_PERIC11,
+       ENABLE_PCLK_PERIC1,
+       ENABLE_SCLK_PERIC10,
+};
+
+static struct samsung_mux_clock peric1_mux_clks[] __initdata = {
+       MUX(0, "mout_aclk_peric1_66_user", mout_aclk_peric1_66_p,
+               MUX_SEL_PERIC10, 0, 1),
+
+       MUX(0, "mout_sclk_uart1_user", mout_sclk_uart1_p,
+               MUX_SEL_PERIC11, 20, 1),
+       MUX(0, "mout_sclk_uart2_user", mout_sclk_uart2_p,
+               MUX_SEL_PERIC11, 24, 1),
+       MUX(0, "mout_sclk_uart3_user", mout_sclk_uart3_p,
+               MUX_SEL_PERIC11, 28, 1),
+};
+
+static struct samsung_gate_clock peric1_gate_clks[] __initdata = {
+       GATE(PCLK_HSI2C2, "pclk_hsi2c2", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 4, 0, 0),
+       GATE(PCLK_HSI2C3, "pclk_hsi2c3", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 5, 0, 0),
+       GATE(PCLK_HSI2C6, "pclk_hsi2c6", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 6, 0, 0),
+       GATE(PCLK_HSI2C7, "pclk_hsi2c7", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 7, 0, 0),
+       GATE(PCLK_HSI2C8, "pclk_hsi2c8", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 8, 0, 0),
+       GATE(PCLK_UART1, "pclk_uart1", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 9, 0, 0),
+       GATE(PCLK_UART2, "pclk_uart2", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 10, 0, 0),
+       GATE(PCLK_UART3, "pclk_uart3", "mout_aclk_peric1_66_user",
+               ENABLE_PCLK_PERIC1, 11, 0, 0),
+
+       GATE(SCLK_UART1, "sclk_uart1_user", "mout_sclk_uart1_user",
+               ENABLE_SCLK_PERIC10, 9, 0, 0),
+       GATE(SCLK_UART2, "sclk_uart2_user", "mout_sclk_uart2_user",
+               ENABLE_SCLK_PERIC10, 10, 0, 0),
+       GATE(SCLK_UART3, "sclk_uart3_user", "mout_sclk_uart3_user",
+               ENABLE_SCLK_PERIC10, 11, 0, 0),
+};
+
+static struct samsung_cmu_info peric1_cmu_info __initdata = {
+       .mux_clks               = peric1_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(peric1_mux_clks),
+       .gate_clks              = peric1_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(peric1_gate_clks),
+       .nr_clk_ids             = PERIC1_NR_CLK,
+       .clk_regs               = peric1_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(peric1_clk_regs),
+};
+
+static void __init exynos7_clk_peric1_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &peric1_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_peric1, "samsung,exynos7-clock-peric1",
+       exynos7_clk_peric1_init);
+
+/* Register Offset definitions for CMU_PERIS (0x10040000) */
+#define MUX_SEL_PERIS                  0x0200
+#define ENABLE_PCLK_PERIS              0x0900
+#define ENABLE_PCLK_PERIS_SECURE_CHIPID        0x0910
+#define ENABLE_SCLK_PERIS              0x0A00
+#define ENABLE_SCLK_PERIS_SECURE_CHIPID        0x0A10
+
+/* List of parent clocks for Muxes in CMU_PERIS */
+PNAME(mout_aclk_peris_66_p) = { "fin_pll", "dout_aclk_peris_66" };
+
+static unsigned long peris_clk_regs[] __initdata = {
+       MUX_SEL_PERIS,
+       ENABLE_PCLK_PERIS,
+       ENABLE_PCLK_PERIS_SECURE_CHIPID,
+       ENABLE_SCLK_PERIS,
+       ENABLE_SCLK_PERIS_SECURE_CHIPID,
+};
+
+static struct samsung_mux_clock peris_mux_clks[] __initdata = {
+       MUX(0, "mout_aclk_peris_66_user",
+               mout_aclk_peris_66_p, MUX_SEL_PERIS, 0, 1),
+};
+
+static struct samsung_gate_clock peris_gate_clks[] __initdata = {
+       GATE(PCLK_WDT, "pclk_wdt", "mout_aclk_peris_66_user",
+               ENABLE_PCLK_PERIS, 6, 0, 0),
+       GATE(PCLK_TMU, "pclk_tmu_apbif", "mout_aclk_peris_66_user",
+               ENABLE_PCLK_PERIS, 10, 0, 0),
+
+       GATE(PCLK_CHIPID, "pclk_chipid", "mout_aclk_peris_66_user",
+               ENABLE_PCLK_PERIS_SECURE_CHIPID, 0, 0, 0),
+       GATE(SCLK_CHIPID, "sclk_chipid", "fin_pll",
+               ENABLE_SCLK_PERIS_SECURE_CHIPID, 0, 0, 0),
+
+       GATE(SCLK_TMU, "sclk_tmu", "fin_pll", ENABLE_SCLK_PERIS, 10, 0, 0),
+};
+
+static struct samsung_cmu_info peris_cmu_info __initdata = {
+       .mux_clks               = peris_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(peris_mux_clks),
+       .gate_clks              = peris_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(peris_gate_clks),
+       .nr_clk_ids             = PERIS_NR_CLK,
+       .clk_regs               = peris_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(peris_clk_regs),
+};
+
+static void __init exynos7_clk_peris_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &peris_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_peris, "samsung,exynos7-clock-peris",
+       exynos7_clk_peris_init);
+
+/* Register Offset definitions for CMU_FSYS0 (0x10E90000) */
+#define MUX_SEL_FSYS00                 0x0200
+#define MUX_SEL_FSYS01                 0x0204
+#define ENABLE_ACLK_FSYS01             0x0804
+
+/*
+ * List of parent clocks for Muxes in CMU_FSYS0
+ */
+PNAME(mout_aclk_fsys0_200_p)   = { "fin_pll", "dout_aclk_fsys0_200" };
+PNAME(mout_sclk_mmc2_p)                = { "fin_pll", "sclk_mmc2" };
+
+static unsigned long fsys0_clk_regs[] __initdata = {
+       MUX_SEL_FSYS00,
+       MUX_SEL_FSYS01,
+       ENABLE_ACLK_FSYS01,
+};
+
+static struct samsung_mux_clock fsys0_mux_clks[] __initdata = {
+       MUX(0, "mout_aclk_fsys0_200_user", mout_aclk_fsys0_200_p,
+               MUX_SEL_FSYS00, 24, 1),
+
+       MUX(0, "mout_sclk_mmc2_user", mout_sclk_mmc2_p, MUX_SEL_FSYS01, 24, 1),
+};
+
+static struct samsung_gate_clock fsys0_gate_clks[] __initdata = {
+       GATE(ACLK_MMC2, "aclk_mmc2", "mout_aclk_fsys0_200_user",
+               ENABLE_ACLK_FSYS01, 31, 0, 0),
+};
+
+static struct samsung_cmu_info fsys0_cmu_info __initdata = {
+       .mux_clks               = fsys0_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(fsys0_mux_clks),
+       .gate_clks              = fsys0_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(fsys0_gate_clks),
+       .nr_clk_ids             = TOP1_NR_CLK,
+       .clk_regs               = fsys0_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(fsys0_clk_regs),
+};
+
+static void __init exynos7_clk_fsys0_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &fsys0_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_fsys0, "samsung,exynos7-clock-fsys0",
+       exynos7_clk_fsys0_init);
+
+/* Register Offset definitions for CMU_FSYS1 (0x156E0000) */
+#define MUX_SEL_FSYS10                 0x0200
+#define MUX_SEL_FSYS11                 0x0204
+#define ENABLE_ACLK_FSYS1              0x0800
+
+/*
+ * List of parent clocks for Muxes in CMU_FSYS1
+ */
+PNAME(mout_aclk_fsys1_200_p)   = { "fin_pll",  "dout_aclk_fsys1_200" };
+PNAME(mout_sclk_mmc0_p)                = { "fin_pll", "sclk_mmc0" };
+PNAME(mout_sclk_mmc1_p)                = { "fin_pll", "sclk_mmc1" };
+
+static unsigned long fsys1_clk_regs[] __initdata = {
+       MUX_SEL_FSYS10,
+       MUX_SEL_FSYS11,
+       ENABLE_ACLK_FSYS1,
+};
+
+static struct samsung_mux_clock fsys1_mux_clks[] __initdata = {
+       MUX(0, "mout_aclk_fsys1_200_user", mout_aclk_fsys1_200_p,
+               MUX_SEL_FSYS10, 28, 1),
+
+       MUX(0, "mout_sclk_mmc1_user", mout_sclk_mmc1_p, MUX_SEL_FSYS11, 24, 1),
+       MUX(0, "mout_sclk_mmc0_user", mout_sclk_mmc0_p, MUX_SEL_FSYS11, 28, 1),
+};
+
+static struct samsung_gate_clock fsys1_gate_clks[] __initdata = {
+       GATE(ACLK_MMC1, "aclk_mmc1", "mout_aclk_fsys1_200_user",
+               ENABLE_ACLK_FSYS1, 29, 0, 0),
+       GATE(ACLK_MMC0, "aclk_mmc0", "mout_aclk_fsys1_200_user",
+               ENABLE_ACLK_FSYS1, 30, 0, 0),
+};
+
+static struct samsung_cmu_info fsys1_cmu_info __initdata = {
+       .mux_clks               = fsys1_mux_clks,
+       .nr_mux_clks            = ARRAY_SIZE(fsys1_mux_clks),
+       .gate_clks              = fsys1_gate_clks,
+       .nr_gate_clks           = ARRAY_SIZE(fsys1_gate_clks),
+       .nr_clk_ids             = TOP1_NR_CLK,
+       .clk_regs               = fsys1_clk_regs,
+       .nr_clk_regs            = ARRAY_SIZE(fsys1_clk_regs),
+};
+
+static void __init exynos7_clk_fsys1_init(struct device_node *np)
+{
+       samsung_cmu_register_one(np, &fsys1_cmu_info);
+}
+
+CLK_OF_DECLARE(exynos7_clk_fsys1, "samsung,exynos7-clock-fsys1",
+       exynos7_clk_fsys1_init);
index b07fad2a916703731731f40b5db3aa678e835b17..9d70e5c03804cee247ee54ff4faed549bbf7ea12 100644 (file)
@@ -482,6 +482,8 @@ static const struct clk_ops samsung_pll45xx_clk_min_ops = {
 
 #define PLL46XX_VSEL_MASK      (1)
 #define PLL46XX_MDIV_MASK      (0x1FF)
+#define PLL1460X_MDIV_MASK     (0x3FF)
+
 #define PLL46XX_PDIV_MASK      (0x3F)
 #define PLL46XX_SDIV_MASK      (0x7)
 #define PLL46XX_VSEL_SHIFT     (27)
@@ -511,13 +513,15 @@ static unsigned long samsung_pll46xx_recalc_rate(struct clk_hw *hw,
 
        pll_con0 = __raw_readl(pll->con_reg);
        pll_con1 = __raw_readl(pll->con_reg + 4);
-       mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & PLL46XX_MDIV_MASK;
+       mdiv = (pll_con0 >> PLL46XX_MDIV_SHIFT) & ((pll->type == pll_1460x) ?
+                               PLL1460X_MDIV_MASK : PLL46XX_MDIV_MASK);
        pdiv = (pll_con0 >> PLL46XX_PDIV_SHIFT) & PLL46XX_PDIV_MASK;
        sdiv = (pll_con0 >> PLL46XX_SDIV_SHIFT) & PLL46XX_SDIV_MASK;
        kdiv = pll->type == pll_4650c ? pll_con1 & PLL4650C_KDIV_MASK :
                                        pll_con1 & PLL46XX_KDIV_MASK;
 
-       shift = pll->type == pll_4600 ? 16 : 10;
+       shift = ((pll->type == pll_4600) || (pll->type == pll_1460x)) ? 16 : 10;
+
        fvco *= (mdiv << shift) + kdiv;
        do_div(fvco, (pdiv << sdiv));
        fvco >>= shift;
@@ -573,14 +577,21 @@ static int samsung_pll46xx_set_rate(struct clk_hw *hw, unsigned long drate,
                lock = 0xffff;
 
        /* Set PLL PMS and VSEL values. */
-       con0 &= ~((PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
+       if (pll->type == pll_1460x) {
+               con0 &= ~((PLL1460X_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
+                       (PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) |
+                       (PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT));
+       } else {
+               con0 &= ~((PLL46XX_MDIV_MASK << PLL46XX_MDIV_SHIFT) |
                        (PLL46XX_PDIV_MASK << PLL46XX_PDIV_SHIFT) |
                        (PLL46XX_SDIV_MASK << PLL46XX_SDIV_SHIFT) |
                        (PLL46XX_VSEL_MASK << PLL46XX_VSEL_SHIFT));
+               con0 |= rate->vsel << PLL46XX_VSEL_SHIFT;
+       }
+
        con0 |= (rate->mdiv << PLL46XX_MDIV_SHIFT) |
                        (rate->pdiv << PLL46XX_PDIV_SHIFT) |
-                       (rate->sdiv << PLL46XX_SDIV_SHIFT) |
-                       (rate->vsel << PLL46XX_VSEL_SHIFT);
+                       (rate->sdiv << PLL46XX_SDIV_SHIFT);
 
        /* Set PLL K, MFR and MRR values. */
        con1 = __raw_readl(pll->con_reg + 0x4);
@@ -1190,6 +1201,9 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
        /* clk_ops for 35xx and 2550 are similar */
        case pll_35xx:
        case pll_2550:
+       case pll_1450x:
+       case pll_1451x:
+       case pll_1452x:
                if (!pll->rate_table)
                        init.ops = &samsung_pll35xx_clk_min_ops;
                else
@@ -1223,6 +1237,7 @@ static void __init _samsung_clk_register_pll(struct samsung_clk_provider *ctx,
        case pll_4600:
        case pll_4650:
        case pll_4650c:
+       case pll_1460x:
                if (!pll->rate_table)
                        init.ops = &samsung_pll46xx_clk_min_ops;
                else
index c0ed4d41fd90d96adb8f3c465b71e016c7bf787e..213de9af8b4f2ea34e9258ce9df3373f9571f712 100644 (file)
@@ -33,6 +33,10 @@ enum samsung_pll_type {
        pll_s3c2440_mpll,
        pll_2550xx,
        pll_2650xx,
+       pll_1450x,
+       pll_1451x,
+       pll_1452x,
+       pll_1460x,
 };
 
 #define PLL_35XX_RATE(_rate, _m, _p, _s)                       \
index deab84d9f37deec161070fc372f6756a90369a43..4bda54095a16a0f3a1a670d4e949a4267f411041 100644 (file)
  * clock framework for Samsung platforms.
 */
 
+#include <linux/of_address.h>
 #include <linux/syscore_ops.h>
+
 #include "clk.h"
 
+static LIST_HEAD(clock_reg_cache_list);
+
 void samsung_clk_save(void __iomem *base,
                                    struct samsung_clk_reg_dump *rd,
                                    unsigned int num_regs)
@@ -281,7 +285,6 @@ void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx,
  * obtain the clock speed of all external fixed clock sources from device
  * tree and register it
  */
-#ifdef CONFIG_OF
 void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
                        struct samsung_fixed_rate_clock *fixed_rate_clk,
                        unsigned int nr_fixed_rate_clk,
@@ -298,7 +301,6 @@ void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx,
        }
        samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk);
 }
-#endif
 
 /* utility function to get the rate of a specified clock */
 unsigned long _get_rate(const char *clk_name)
@@ -313,3 +315,99 @@ unsigned long _get_rate(const char *clk_name)
 
        return clk_get_rate(clk);
 }
+
+#ifdef CONFIG_PM_SLEEP
+static int samsung_clk_suspend(void)
+{
+       struct samsung_clock_reg_cache *reg_cache;
+
+       list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
+               samsung_clk_save(reg_cache->reg_base, reg_cache->rdump,
+                               reg_cache->rd_num);
+       return 0;
+}
+
+static void samsung_clk_resume(void)
+{
+       struct samsung_clock_reg_cache *reg_cache;
+
+       list_for_each_entry(reg_cache, &clock_reg_cache_list, node)
+               samsung_clk_restore(reg_cache->reg_base, reg_cache->rdump,
+                               reg_cache->rd_num);
+}
+
+static struct syscore_ops samsung_clk_syscore_ops = {
+       .suspend = samsung_clk_suspend,
+       .resume = samsung_clk_resume,
+};
+
+static void samsung_clk_sleep_init(void __iomem *reg_base,
+               const unsigned long *rdump,
+               unsigned long nr_rdump)
+{
+       struct samsung_clock_reg_cache *reg_cache;
+
+       reg_cache = kzalloc(sizeof(struct samsung_clock_reg_cache),
+                       GFP_KERNEL);
+       if (!reg_cache)
+               panic("could not allocate register reg_cache.\n");
+       reg_cache->rdump = samsung_clk_alloc_reg_dump(rdump, nr_rdump);
+
+       if (!reg_cache->rdump)
+               panic("could not allocate register dump storage.\n");
+
+       if (list_empty(&clock_reg_cache_list))
+               register_syscore_ops(&samsung_clk_syscore_ops);
+
+       reg_cache->reg_base = reg_base;
+       reg_cache->rd_num = nr_rdump;
+       list_add_tail(&reg_cache->node, &clock_reg_cache_list);
+}
+
+#else
+static void samsung_clk_sleep_init(void __iomem *reg_base,
+               const unsigned long *rdump,
+               unsigned long nr_rdump) {}
+#endif
+
+/*
+ * Common function which registers plls, muxes, dividers and gates
+ * for each CMU. It also add CMU register list to register cache.
+ */
+void __init samsung_cmu_register_one(struct device_node *np,
+                       struct samsung_cmu_info *cmu)
+{
+       void __iomem *reg_base;
+       struct samsung_clk_provider *ctx;
+
+       reg_base = of_iomap(np, 0);
+       if (!reg_base)
+               panic("%s: failed to map registers\n", __func__);
+
+       ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids);
+       if (!ctx)
+               panic("%s: unable to alllocate ctx\n", __func__);
+
+       if (cmu->pll_clks)
+               samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks,
+                       reg_base);
+       if (cmu->mux_clks)
+               samsung_clk_register_mux(ctx, cmu->mux_clks,
+                       cmu->nr_mux_clks);
+       if (cmu->div_clks)
+               samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks);
+       if (cmu->gate_clks)
+               samsung_clk_register_gate(ctx, cmu->gate_clks,
+                       cmu->nr_gate_clks);
+       if (cmu->fixed_clks)
+               samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks,
+                       cmu->nr_fixed_clks);
+       if (cmu->fixed_factor_clks)
+               samsung_clk_register_fixed_factor(ctx, cmu->fixed_factor_clks,
+                       cmu->nr_fixed_factor_clks);
+       if (cmu->clk_regs)
+               samsung_clk_sleep_init(reg_base, cmu->clk_regs,
+                       cmu->nr_clk_regs);
+
+       samsung_clk_of_add_provider(np, ctx);
+}
index 66ab36b5cef1018de647434cc0848f549b546ab2..8acabe1f32c4d4f13264eb26088b237778becf6b 100644 (file)
 #ifndef __SAMSUNG_CLK_H
 #define __SAMSUNG_CLK_H
 
-#include <linux/clk.h>
 #include <linux/clkdev.h>
-#include <linux/io.h>
 #include <linux/clk-provider.h>
-#include <linux/of.h>
-#include <linux/of_address.h>
 #include "clk-pll.h"
 
 /**
  * struct samsung_clk_provider: information about clock provider
  * @reg_base: virtual address for the register base.
  * @clk_data: holds clock related data like clk* and number of clocks.
- * @lock: maintains exclusion bwtween callbacks for a given clock-provider.
+ * @lock: maintains exclusion between callbacks for a given clock-provider.
  */
 struct samsung_clk_provider {
        void __iomem *reg_base;
@@ -324,6 +320,40 @@ struct samsung_pll_clock {
        __PLL(_typ, _id, NULL, _name, _pname, CLK_GET_RATE_NOCACHE,     \
                _lock, _con, _rtable, _alias)
 
+struct samsung_clock_reg_cache {
+       struct list_head node;
+       void __iomem *reg_base;
+       struct samsung_clk_reg_dump *rdump;
+       unsigned int rd_num;
+};
+
+struct samsung_cmu_info {
+       /* list of pll clocks and respective count */
+       struct samsung_pll_clock *pll_clks;
+       unsigned int nr_pll_clks;
+       /* list of mux clocks and respective count */
+       struct samsung_mux_clock *mux_clks;
+       unsigned int nr_mux_clks;
+       /* list of div clocks and respective count */
+       struct samsung_div_clock *div_clks;
+       unsigned int nr_div_clks;
+       /* list of gate clocks and respective count */
+       struct samsung_gate_clock *gate_clks;
+       unsigned int nr_gate_clks;
+       /* list of fixed clocks and respective count */
+       struct samsung_fixed_rate_clock *fixed_clks;
+       unsigned int nr_fixed_clks;
+       /* list of fixed factor clocks and respective count */
+       struct samsung_fixed_factor_clock *fixed_factor_clks;
+       unsigned int nr_fixed_factor_clks;
+       /* total number of clocks with IDs assigned*/
+       unsigned int nr_clk_ids;
+
+       /* list and number of clocks registers */
+       unsigned long *clk_regs;
+       unsigned int nr_clk_regs;
+};
+
 extern struct samsung_clk_provider *__init samsung_clk_init(
                        struct device_node *np, void __iomem *base,
                        unsigned long nr_clks);
@@ -362,6 +392,9 @@ extern void __init samsung_clk_register_pll(struct samsung_clk_provider *ctx,
                        struct samsung_pll_clock *pll_list,
                        unsigned int nr_clk, void __iomem *base);
 
+extern void __init samsung_cmu_register_one(struct device_node *,
+                       struct samsung_cmu_info *);
+
 extern unsigned long _get_rate(const char *clk_name);
 
 extern void samsung_clk_save(void __iomem *base,
index f065f694cb65699705def0036c3f47e903e813ae..639241e31e03ec244907761ef430ed95bc53adde 100644 (file)
@@ -32,6 +32,9 @@ struct div6_clock {
        struct clk_hw hw;
        void __iomem *reg;
        unsigned int div;
+       u32 src_shift;
+       u32 src_width;
+       u8 *parents;
 };
 
 #define to_div6_clock(_hw) container_of(_hw, struct div6_clock, hw)
@@ -39,8 +42,11 @@ struct div6_clock {
 static int cpg_div6_clock_enable(struct clk_hw *hw)
 {
        struct div6_clock *clock = to_div6_clock(hw);
+       u32 val;
 
-       clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg);
+       val = (clk_readl(clock->reg) & ~(CPG_DIV6_DIV_MASK | CPG_DIV6_CKSTP))
+           | CPG_DIV6_DIV(clock->div - 1);
+       clk_writel(val, clock->reg);
 
        return 0;
 }
@@ -52,7 +58,7 @@ static void cpg_div6_clock_disable(struct clk_hw *hw)
        /* DIV6 clocks require the divisor field to be non-zero when stopping
         * the clock.
         */
-       clk_writel(CPG_DIV6_CKSTP | CPG_DIV6_DIV(CPG_DIV6_DIV_MASK),
+       clk_writel(clk_readl(clock->reg) | CPG_DIV6_CKSTP | CPG_DIV6_DIV_MASK,
                   clock->reg);
 }
 
@@ -94,12 +100,53 @@ static int cpg_div6_clock_set_rate(struct clk_hw *hw, unsigned long rate,
 {
        struct div6_clock *clock = to_div6_clock(hw);
        unsigned int div = cpg_div6_clock_calc_div(rate, parent_rate);
+       u32 val;
 
        clock->div = div;
 
+       val = clk_readl(clock->reg) & ~CPG_DIV6_DIV_MASK;
        /* Only program the new divisor if the clock isn't stopped. */
-       if (!(clk_readl(clock->reg) & CPG_DIV6_CKSTP))
-               clk_writel(CPG_DIV6_DIV(clock->div - 1), clock->reg);
+       if (!(val & CPG_DIV6_CKSTP))
+               clk_writel(val | CPG_DIV6_DIV(clock->div - 1), clock->reg);
+
+       return 0;
+}
+
+static u8 cpg_div6_clock_get_parent(struct clk_hw *hw)
+{
+       struct div6_clock *clock = to_div6_clock(hw);
+       unsigned int i;
+       u8 hw_index;
+
+       if (clock->src_width == 0)
+               return 0;
+
+       hw_index = (clk_readl(clock->reg) >> clock->src_shift) &
+                  (BIT(clock->src_width) - 1);
+       for (i = 0; i < __clk_get_num_parents(hw->clk); i++) {
+               if (clock->parents[i] == hw_index)
+                       return i;
+       }
+
+       pr_err("%s: %s DIV6 clock set to invalid parent %u\n",
+              __func__, __clk_get_name(hw->clk), hw_index);
+       return 0;
+}
+
+static int cpg_div6_clock_set_parent(struct clk_hw *hw, u8 index)
+{
+       struct div6_clock *clock = to_div6_clock(hw);
+       u8 hw_index;
+       u32 mask;
+
+       if (index >= __clk_get_num_parents(hw->clk))
+               return -EINVAL;
+
+       mask = ~((BIT(clock->src_width) - 1) << clock->src_shift);
+       hw_index = clock->parents[index];
+
+       clk_writel((clk_readl(clock->reg) & mask) |
+               (hw_index << clock->src_shift), clock->reg);
 
        return 0;
 }
@@ -108,6 +155,8 @@ static const struct clk_ops cpg_div6_clock_ops = {
        .enable = cpg_div6_clock_enable,
        .disable = cpg_div6_clock_disable,
        .is_enabled = cpg_div6_clock_is_enabled,
+       .get_parent = cpg_div6_clock_get_parent,
+       .set_parent = cpg_div6_clock_set_parent,
        .recalc_rate = cpg_div6_clock_recalc_rate,
        .round_rate = cpg_div6_clock_round_rate,
        .set_rate = cpg_div6_clock_set_rate,
@@ -115,20 +164,33 @@ static const struct clk_ops cpg_div6_clock_ops = {
 
 static void __init cpg_div6_clock_init(struct device_node *np)
 {
+       unsigned int num_parents, valid_parents;
+       const char **parent_names;
        struct clk_init_data init;
        struct div6_clock *clock;
-       const char *parent_name;
        const char *name;
        struct clk *clk;
+       unsigned int i;
        int ret;
 
        clock = kzalloc(sizeof(*clock), GFP_KERNEL);
-       if (!clock) {
-               pr_err("%s: failed to allocate %s DIV6 clock\n",
+       if (!clock)
+               return;
+
+       num_parents = of_clk_get_parent_count(np);
+       if (num_parents < 1) {
+               pr_err("%s: no parent found for %s DIV6 clock\n",
                       __func__, np->name);
                return;
        }
 
+       clock->parents = kmalloc_array(num_parents, sizeof(*clock->parents),
+               GFP_KERNEL);
+       parent_names = kmalloc_array(num_parents, sizeof(*parent_names),
+                               GFP_KERNEL);
+       if (!parent_names)
+               return;
+
        /* Remap the clock register and read the divisor. Disabling the
         * clock overwrites the divisor, so we need to cache its value for the
         * enable operation.
@@ -150,9 +212,34 @@ static void __init cpg_div6_clock_init(struct device_node *np)
                goto error;
        }
 
-       parent_name = of_clk_get_parent_name(np, 0);
-       if (parent_name == NULL) {
-               pr_err("%s: failed to get %s DIV6 clock parent name\n",
+
+       for (i = 0, valid_parents = 0; i < num_parents; i++) {
+               const char *name = of_clk_get_parent_name(np, i);
+
+               if (name) {
+                       parent_names[valid_parents] = name;
+                       clock->parents[valid_parents] = i;
+                       valid_parents++;
+               }
+       }
+
+       switch (num_parents) {
+       case 1:
+               /* fixed parent clock */
+               clock->src_shift = clock->src_width = 0;
+               break;
+       case 4:
+               /* clock with EXSRC bits 6-7 */
+               clock->src_shift = 6;
+               clock->src_width = 2;
+               break;
+       case 8:
+               /* VCLK with EXSRC bits 12-14 */
+               clock->src_shift = 12;
+               clock->src_width = 3;
+               break;
+       default:
+               pr_err("%s: invalid number of parents for DIV6 clock %s\n",
                       __func__, np->name);
                goto error;
        }
@@ -161,8 +248,8 @@ static void __init cpg_div6_clock_init(struct device_node *np)
        init.name = name;
        init.ops = &cpg_div6_clock_ops;
        init.flags = CLK_IS_BASIC;
-       init.parent_names = &parent_name;
-       init.num_parents = 1;
+       init.parent_names = parent_names;
+       init.num_parents = valid_parents;
 
        clock->hw.init = &init;
 
@@ -175,11 +262,13 @@ static void __init cpg_div6_clock_init(struct device_node *np)
 
        of_clk_add_provider(np, of_clk_src_simple_get, clk);
 
+       kfree(parent_names);
        return;
 
 error:
        if (clock->reg)
                iounmap(clock->reg);
+       kfree(parent_names);
        kfree(clock);
 }
 CLK_OF_DECLARE(cpg_div6_clk, "renesas,cpg-div6-clock", cpg_div6_clock_init);
index 7ddc2b5538466737fbdc0e05ca3c58a153094b26..a66953c0f43094a4b96a816fb0d4ff37bfedf466 100644 (file)
@@ -7,6 +7,7 @@ obj-y += clk-a10-hosc.o
 obj-y += clk-a20-gmac.o
 obj-y += clk-mod0.o
 obj-y += clk-sun8i-mbus.o
+obj-y += clk-sun9i-core.o
 
 obj-$(CONFIG_MFD_SUN6I_PRCM) += \
        clk-sun6i-ar100.o clk-sun6i-apb0.o clk-sun6i-apb0-gates.o \
index 5296fd6dd7b3f9a9c7103b3fd7ae63b1f0259963..0dcf4f205fb86f1510dfa8b74f20311e301613ce 100644 (file)
@@ -53,6 +53,11 @@ static DEFINE_SPINLOCK(gmac_lock);
 #define SUN7I_A20_GMAC_MASK    0x3
 #define SUN7I_A20_GMAC_PARENTS 2
 
+static u32 sun7i_a20_gmac_mux_table[SUN7I_A20_GMAC_PARENTS] = {
+       0x00, /* Select mii_phy_tx_clk */
+       0x02, /* Select gmac_int_tx_clk */
+};
+
 static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
 {
        struct clk *clk;
@@ -90,7 +95,7 @@ static void __init sun7i_a20_gmac_clk_setup(struct device_node *node)
        gate->lock = &gmac_lock;
        mux->reg = reg;
        mux->mask = SUN7I_A20_GMAC_MASK;
-       mux->flags = CLK_MUX_INDEX_BIT;
+       mux->table = sun7i_a20_gmac_mux_table;
        mux->lock = &gmac_lock;
 
        clk = clk_register_composite(NULL, clk_name,
index f83ba097126c6e72c22517a66f17034474e8ebf2..62e08fb58554cbe8d4bf8f1c5a08f08bbd58ec68 100644 (file)
@@ -81,7 +81,7 @@ static long clk_factors_round_rate(struct clk_hw *hw, unsigned long rate,
 
 static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
                                       unsigned long *best_parent_rate,
-                                      struct clk **best_parent_p)
+                                      struct clk_hw **best_parent_p)
 {
        struct clk *clk = hw->clk, *parent, *best_parent = NULL;
        int i, num_parents;
@@ -108,7 +108,7 @@ static long clk_factors_determine_rate(struct clk_hw *hw, unsigned long rate,
        }
 
        if (best_parent)
-               *best_parent_p = best_parent;
+               *best_parent_p = __clk_get_hw(best_parent);
        *best_parent_rate = best;
 
        return best_child_rate;
@@ -224,7 +224,7 @@ struct clk * __init sunxi_factors_register(struct device_node *node,
                /* set up gate properties */
                mux->reg = reg;
                mux->shift = data->mux;
-               mux->mask = SUNXI_FACTORS_MUX_MASK;
+               mux->mask = data->muxmask;
                mux->lock = factors->lock;
                mux_hw = &mux->hw;
        }
index 9913840018d3fdb4f4717a4636b68f8dc6f4a6e6..912238fde1324224863035da8a5836b5c276e8a0 100644 (file)
@@ -7,8 +7,6 @@
 
 #define SUNXI_FACTORS_NOT_APPLICABLE   (0)
 
-#define SUNXI_FACTORS_MUX_MASK 0x3
-
 struct clk_factors_config {
        u8 nshift;
        u8 nwidth;
@@ -24,6 +22,7 @@ struct clk_factors_config {
 struct factors_data {
        int enable;
        int mux;
+       int muxmask;
        struct clk_factors_config *table;
        void (*getter) (u32 *rate, u32 parent_rate, u8 *n, u8 *k, u8 *m, u8 *p);
        const char *name;
index 4a563850ee6ee3a158a286b8a1cfb37ed713569b..da0524eaee9406aff6c2d73f8b56b82c12360a16 100644 (file)
@@ -70,6 +70,7 @@ static struct clk_factors_config sun4i_a10_mod0_config = {
 static const struct factors_data sun4i_a10_mod0_data __initconst = {
        .enable = 31,
        .mux = 24,
+       .muxmask = BIT(1) | BIT(0),
        .table = &sun4i_a10_mod0_config,
        .getter = sun4i_a10_get_mod0_factors,
 };
index acca53290be26c8f5c6aea33ee92b7d7f590703d..3d282fb8f85cc204d84c182aa4b16ae2181ea5e9 100644 (file)
@@ -46,7 +46,7 @@ static unsigned long ar100_recalc_rate(struct clk_hw *hw,
 
 static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
                                 unsigned long *best_parent_rate,
-                                struct clk **best_parent_clk)
+                                struct clk_hw **best_parent_clk)
 {
        int nparents = __clk_get_num_parents(hw->clk);
        long best_rate = -EINVAL;
@@ -100,7 +100,7 @@ static long ar100_determine_rate(struct clk_hw *hw, unsigned long rate,
 
                tmp_rate = (parent_rate >> shift) / div;
                if (!*best_parent_clk || tmp_rate > best_rate) {
-                       *best_parent_clk = parent;
+                       *best_parent_clk = __clk_get_hw(parent);
                        *best_parent_rate = parent_rate;
                        best_rate = tmp_rate;
                }
index 8e49b44cee41d9df81ffa666cf812a3fffcdd7af..ef49786eefd3caa5b6f856287635a973213a14b3 100644 (file)
@@ -60,6 +60,7 @@ static struct clk_factors_config sun8i_a23_mbus_config = {
 static const struct factors_data sun8i_a23_mbus_data __initconst = {
        .enable = 31,
        .mux = 24,
+       .muxmask = BIT(1) | BIT(0),
        .table = &sun8i_a23_mbus_config,
        .getter = sun8i_a23_get_mbus_factors,
 };
diff --git a/drivers/clk/sunxi/clk-sun9i-core.c b/drivers/clk/sunxi/clk-sun9i-core.c
new file mode 100644 (file)
index 0000000..3cb9036
--- /dev/null
@@ -0,0 +1,271 @@
+/*
+ * Copyright 2014 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/clkdev.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/log2.h>
+
+#include "clk-factors.h"
+
+
+/**
+ * sun9i_a80_get_pll4_factors() - calculates n, p, m factors for PLL1
+ * PLL4 rate is calculated as follows
+ * rate = (parent_rate * n >> p) / (m + 1);
+ * parent_rate is always 24Mhz
+ *
+ * p and m are named div1 and div2 in Allwinner's SDK
+ */
+
+static void sun9i_a80_get_pll4_factors(u32 *freq, u32 parent_rate,
+                                      u8 *n, u8 *k, u8 *m, u8 *p)
+{
+       int div;
+
+       /* Normalize value to a 6M multiple */
+       div = DIV_ROUND_UP(*freq, 6000000);
+
+       /* divs above 256 cannot be odd */
+       if (div > 256)
+               div = round_up(div, 2);
+
+       /* divs above 512 must be a multiple of 4 */
+       if (div > 512)
+               div = round_up(div, 4);
+
+       *freq = 6000000 * div;
+
+       /* we were called to round the frequency, we can now return */
+       if (n == NULL)
+               return;
+
+       /* p will be 1 for divs under 512 */
+       if (div < 512)
+               *p = 1;
+       else
+               *p = 0;
+
+       /* m will be 1 if div is odd */
+       if (div & 1)
+               *m = 1;
+       else
+               *m = 0;
+
+       /* calculate a suitable n based on m and p */
+       *n = div / (*p + 1) / (*m + 1);
+}
+
+static struct clk_factors_config sun9i_a80_pll4_config = {
+       .mshift = 18,
+       .mwidth = 1,
+       .nshift = 8,
+       .nwidth = 8,
+       .pshift = 16,
+       .pwidth = 1,
+};
+
+static const struct factors_data sun9i_a80_pll4_data __initconst = {
+       .enable = 31,
+       .table = &sun9i_a80_pll4_config,
+       .getter = sun9i_a80_get_pll4_factors,
+};
+
+static DEFINE_SPINLOCK(sun9i_a80_pll4_lock);
+
+static void __init sun9i_a80_pll4_setup(struct device_node *node)
+{
+       sunxi_factors_register(node, &sun9i_a80_pll4_data, &sun9i_a80_pll4_lock);
+}
+CLK_OF_DECLARE(sun9i_a80_pll4, "allwinner,sun9i-a80-pll4-clk", sun9i_a80_pll4_setup);
+
+
+/**
+ * sun9i_a80_get_gt_factors() - calculates m factor for GT
+ * GT rate is calculated as follows
+ * rate = parent_rate / (m + 1);
+ */
+
+static void sun9i_a80_get_gt_factors(u32 *freq, u32 parent_rate,
+                                    u8 *n, u8 *k, u8 *m, u8 *p)
+{
+       u32 div;
+
+       if (parent_rate < *freq)
+               *freq = parent_rate;
+
+       div = DIV_ROUND_UP(parent_rate, *freq);
+
+       /* maximum divider is 4 */
+       if (div > 4)
+               div = 4;
+
+       *freq = parent_rate / div;
+
+       /* we were called to round the frequency, we can now return */
+       if (!m)
+               return;
+
+       *m = div;
+}
+
+static struct clk_factors_config sun9i_a80_gt_config = {
+       .mshift = 0,
+       .mwidth = 2,
+};
+
+static const struct factors_data sun9i_a80_gt_data __initconst = {
+       .mux = 24,
+       .muxmask = BIT(1) | BIT(0),
+       .table = &sun9i_a80_gt_config,
+       .getter = sun9i_a80_get_gt_factors,
+};
+
+static DEFINE_SPINLOCK(sun9i_a80_gt_lock);
+
+static void __init sun9i_a80_gt_setup(struct device_node *node)
+{
+       struct clk *gt = sunxi_factors_register(node, &sun9i_a80_gt_data,
+                                               &sun9i_a80_gt_lock);
+
+       /* The GT bus clock needs to be always enabled */
+       __clk_get(gt);
+       clk_prepare_enable(gt);
+}
+CLK_OF_DECLARE(sun9i_a80_gt, "allwinner,sun9i-a80-gt-clk", sun9i_a80_gt_setup);
+
+
+/**
+ * sun9i_a80_get_ahb_factors() - calculates p factor for AHB0/1/2
+ * AHB rate is calculated as follows
+ * rate = parent_rate >> p;
+ */
+
+static void sun9i_a80_get_ahb_factors(u32 *freq, u32 parent_rate,
+                                     u8 *n, u8 *k, u8 *m, u8 *p)
+{
+       u32 _p;
+
+       if (parent_rate < *freq)
+               *freq = parent_rate;
+
+       _p = order_base_2(DIV_ROUND_UP(parent_rate, *freq));
+
+       /* maximum p is 3 */
+       if (_p > 3)
+               _p = 3;
+
+       *freq = parent_rate >> _p;
+
+       /* we were called to round the frequency, we can now return */
+       if (!p)
+               return;
+
+       *p = _p;
+}
+
+static struct clk_factors_config sun9i_a80_ahb_config = {
+       .pshift = 0,
+       .pwidth = 2,
+};
+
+static const struct factors_data sun9i_a80_ahb_data __initconst = {
+       .mux = 24,
+       .muxmask = BIT(1) | BIT(0),
+       .table = &sun9i_a80_ahb_config,
+       .getter = sun9i_a80_get_ahb_factors,
+};
+
+static DEFINE_SPINLOCK(sun9i_a80_ahb_lock);
+
+static void __init sun9i_a80_ahb_setup(struct device_node *node)
+{
+       sunxi_factors_register(node, &sun9i_a80_ahb_data, &sun9i_a80_ahb_lock);
+}
+CLK_OF_DECLARE(sun9i_a80_ahb, "allwinner,sun9i-a80-ahb-clk", sun9i_a80_ahb_setup);
+
+
+static const struct factors_data sun9i_a80_apb0_data __initconst = {
+       .mux = 24,
+       .muxmask = BIT(0),
+       .table = &sun9i_a80_ahb_config,
+       .getter = sun9i_a80_get_ahb_factors,
+};
+
+static DEFINE_SPINLOCK(sun9i_a80_apb0_lock);
+
+static void __init sun9i_a80_apb0_setup(struct device_node *node)
+{
+       sunxi_factors_register(node, &sun9i_a80_apb0_data, &sun9i_a80_apb0_lock);
+}
+CLK_OF_DECLARE(sun9i_a80_apb0, "allwinner,sun9i-a80-apb0-clk", sun9i_a80_apb0_setup);
+
+
+/**
+ * sun9i_a80_get_apb1_factors() - calculates m, p factors for APB1
+ * APB1 rate is calculated as follows
+ * rate = (parent_rate >> p) / (m + 1);
+ */
+
+static void sun9i_a80_get_apb1_factors(u32 *freq, u32 parent_rate,
+                                      u8 *n, u8 *k, u8 *m, u8 *p)
+{
+       u32 div;
+       u8 calcm, calcp;
+
+       if (parent_rate < *freq)
+               *freq = parent_rate;
+
+       div = DIV_ROUND_UP(parent_rate, *freq);
+
+       /* Highest possible divider is 256 (p = 3, m = 31) */
+       if (div > 256)
+               div = 256;
+
+       calcp = order_base_2(div);
+       calcm = (parent_rate >> calcp) - 1;
+       *freq = (parent_rate >> calcp) / (calcm + 1);
+
+       /* we were called to round the frequency, we can now return */
+       if (n == NULL)
+               return;
+
+       *m = calcm;
+       *p = calcp;
+}
+
+static struct clk_factors_config sun9i_a80_apb1_config = {
+       .mshift = 0,
+       .mwidth = 5,
+       .pshift = 16,
+       .pwidth = 2,
+};
+
+static const struct factors_data sun9i_a80_apb1_data __initconst = {
+       .mux = 24,
+       .muxmask = BIT(0),
+       .table = &sun9i_a80_apb1_config,
+       .getter = sun9i_a80_get_apb1_factors,
+};
+
+static DEFINE_SPINLOCK(sun9i_a80_apb1_lock);
+
+static void __init sun9i_a80_apb1_setup(struct device_node *node)
+{
+       sunxi_factors_register(node, &sun9i_a80_apb1_data, &sun9i_a80_apb1_lock);
+}
+CLK_OF_DECLARE(sun9i_a80_apb1, "allwinner,sun9i-a80-apb1-clk", sun9i_a80_apb1_setup);
index d5dc951264cab957bcf2522bc61a9c62395c6432..570202582dcfef88bcd721f99250e6a82bfb3493 100644 (file)
@@ -245,9 +245,9 @@ static void sun4i_get_pll5_factors(u32 *freq, u32 parent_rate,
 }
 
 /**
- * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6
- * PLL6 rate is calculated as follows
- * rate = parent_rate * n * (k + 1) / 2
+ * sun6i_a31_get_pll6_factors() - calculates n, k factors for A31 PLL6x2
+ * PLL6x2 rate is calculated as follows
+ * rate = parent_rate * (n + 1) * (k + 1)
  * parent_rate is always 24Mhz
  */
 
@@ -256,13 +256,7 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
 {
        u8 div;
 
-       /*
-        * We always have 24MHz / 2, so we can just say that our
-        * parent clock is 12MHz.
-        */
-       parent_rate = parent_rate / 2;
-
-       /* Normalize value to a parent_rate multiple (24M / 2) */
+       /* Normalize value to a parent_rate multiple (24M) */
        div = *freq / parent_rate;
        *freq = parent_rate * div;
 
@@ -274,7 +268,7 @@ static void sun6i_a31_get_pll6_factors(u32 *freq, u32 parent_rate,
        if (*k > 3)
                *k = 3;
 
-       *n = DIV_ROUND_UP(div, (*k+1));
+       *n = DIV_ROUND_UP(div, (*k+1)) - 1;
 }
 
 /**
@@ -445,6 +439,7 @@ static struct clk_factors_config sun6i_a31_pll6_config = {
        .nwidth = 5,
        .kshift = 4,
        .kwidth = 2,
+       .n_start = 1,
 };
 
 static struct clk_factors_config sun4i_apb1_config = {
@@ -504,9 +499,12 @@ static const struct factors_data sun6i_a31_pll6_data __initconst = {
        .enable = 31,
        .table = &sun6i_a31_pll6_config,
        .getter = sun6i_a31_get_pll6_factors,
+       .name = "pll6x2",
 };
 
 static const struct factors_data sun4i_apb1_data __initconst = {
+       .mux = 24,
+       .muxmask = BIT(1) | BIT(0),
        .table = &sun4i_apb1_config,
        .getter = sun4i_get_apb1_factors,
 };
@@ -514,6 +512,7 @@ static const struct factors_data sun4i_apb1_data __initconst = {
 static const struct factors_data sun7i_a20_out_data __initconst = {
        .enable = 31,
        .mux = 24,
+       .muxmask = BIT(1) | BIT(0),
        .table = &sun7i_a20_out_config,
        .getter = sun7i_a20_get_out_factors,
 };
@@ -544,10 +543,6 @@ static const struct mux_data sun6i_a31_ahb1_mux_data __initconst = {
        .shift = 12,
 };
 
-static const struct mux_data sun4i_apb1_mux_data __initconst = {
-       .shift = 24,
-};
-
 static void __init sunxi_mux_clk_setup(struct device_node *node,
                                       struct mux_data *data)
 {
@@ -633,12 +628,6 @@ static const struct div_data sun4i_apb0_data __initconst = {
        .table  = sun4i_apb0_table,
 };
 
-static const struct div_data sun6i_a31_apb2_div_data __initconst = {
-       .shift  = 0,
-       .pow    = 0,
-       .width  = 4,
-};
-
 static void __init sunxi_divider_clk_setup(struct device_node *node,
                                           struct div_data *data)
 {
@@ -757,6 +746,18 @@ static const struct gates_data sun8i_a23_ahb1_gates_data __initconst = {
        .mask = {0x25386742, 0x2505111},
 };
 
+static const struct gates_data sun9i_a80_ahb0_gates_data __initconst = {
+       .mask = {0xF5F12B},
+};
+
+static const struct gates_data sun9i_a80_ahb1_gates_data __initconst = {
+       .mask = {0x1E20003},
+};
+
+static const struct gates_data sun9i_a80_ahb2_gates_data __initconst = {
+       .mask = {0x9B7},
+};
+
 static const struct gates_data sun4i_apb0_gates_data __initconst = {
        .mask = {0x4EF},
 };
@@ -773,6 +774,10 @@ static const struct gates_data sun7i_a20_apb0_gates_data __initconst = {
        .mask = { 0x4ff },
 };
 
+static const struct gates_data sun9i_a80_apb0_gates_data __initconst = {
+       .mask = {0xEB822},
+};
+
 static const struct gates_data sun4i_apb1_gates_data __initconst = {
        .mask = {0xFF00F7},
 };
@@ -801,6 +806,10 @@ static const struct gates_data sun7i_a20_apb1_gates_data __initconst = {
        .mask = { 0xff80ff },
 };
 
+static const struct gates_data sun9i_a80_apb1_gates_data __initconst = {
+       .mask = {0x3F001F},
+};
+
 static const struct gates_data sun8i_a23_apb2_gates_data __initconst = {
        .mask = {0x1F0007},
 };
@@ -893,6 +902,7 @@ static void __init sunxi_gates_clk_setup(struct device_node *node,
 
 struct divs_data {
        const struct factors_data *factors; /* data for the factor clock */
+       int ndivs; /* number of children */
        struct {
                u8 fixed; /* is it a fixed divisor? if not... */
                struct clk_div_table *table; /* is it a table based divisor? */
@@ -912,6 +922,7 @@ static struct clk_div_table pll6_sata_tbl[] = {
 
 static const struct divs_data pll5_divs_data __initconst = {
        .factors = &sun4i_pll5_data,
+       .ndivs = 2,
        .div = {
                { .shift = 0, .pow = 0, }, /* M, DDR */
                { .shift = 16, .pow = 1, }, /* P, other */
@@ -920,12 +931,21 @@ static const struct divs_data pll5_divs_data __initconst = {
 
 static const struct divs_data pll6_divs_data __initconst = {
        .factors = &sun4i_pll6_data,
+       .ndivs = 2,
        .div = {
                { .shift = 0, .table = pll6_sata_tbl, .gate = 14 }, /* M, SATA */
                { .fixed = 2 }, /* P, other */
        }
 };
 
+static const struct divs_data sun6i_a31_pll6_divs_data __initconst = {
+       .factors = &sun6i_a31_pll6_data,
+       .ndivs = 1,
+       .div = {
+               { .fixed = 2 }, /* normal output */
+       }
+};
+
 /**
  * sunxi_divs_clk_setup() - Setup function for leaf divisors on clocks
  *
@@ -950,7 +970,7 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
        struct clk_fixed_factor *fix_factor;
        struct clk_divider *divider;
        void __iomem *reg;
-       int i = 0;
+       int ndivs = SUNXI_DIVS_MAX_QTY, i = 0;
        int flags, clkflags;
 
        /* Set up factor clock that we will be dividing */
@@ -973,7 +993,11 @@ static void __init sunxi_divs_clk_setup(struct device_node *node,
         * our RAM clock! */
        clkflags = !strcmp("pll5", parent) ? 0 : CLK_SET_RATE_PARENT;
 
-       for (i = 0; i < SUNXI_DIVS_MAX_QTY; i++) {
+       /* if number of children known, use it */
+       if (data->ndivs)
+               ndivs = data->ndivs;
+
+       for (i = 0; i < ndivs; i++) {
                if (of_property_read_string_index(node, "clock-output-names",
                                                  i, &clk_name) != 0)
                        break;
@@ -1062,7 +1086,6 @@ static const struct of_device_id clk_factors_match[] __initconst = {
        {.compatible = "allwinner,sun6i-a31-pll1-clk", .data = &sun6i_a31_pll1_data,},
        {.compatible = "allwinner,sun8i-a23-pll1-clk", .data = &sun8i_a23_pll1_data,},
        {.compatible = "allwinner,sun7i-a20-pll4-clk", .data = &sun7i_a20_pll4_data,},
-       {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_data,},
        {.compatible = "allwinner,sun4i-a10-apb1-clk", .data = &sun4i_apb1_data,},
        {.compatible = "allwinner,sun7i-a20-out-clk", .data = &sun7i_a20_out_data,},
        {}
@@ -1074,7 +1097,6 @@ static const struct of_device_id clk_div_match[] __initconst = {
        {.compatible = "allwinner,sun8i-a23-axi-clk", .data = &sun8i_a23_axi_data,},
        {.compatible = "allwinner,sun4i-a10-ahb-clk", .data = &sun4i_ahb_data,},
        {.compatible = "allwinner,sun4i-a10-apb0-clk", .data = &sun4i_apb0_data,},
-       {.compatible = "allwinner,sun6i-a31-apb2-div-clk", .data = &sun6i_a31_apb2_div_data,},
        {}
 };
 
@@ -1082,13 +1104,13 @@ static const struct of_device_id clk_div_match[] __initconst = {
 static const struct of_device_id clk_divs_match[] __initconst = {
        {.compatible = "allwinner,sun4i-a10-pll5-clk", .data = &pll5_divs_data,},
        {.compatible = "allwinner,sun4i-a10-pll6-clk", .data = &pll6_divs_data,},
+       {.compatible = "allwinner,sun6i-a31-pll6-clk", .data = &sun6i_a31_pll6_divs_data,},
        {}
 };
 
 /* Matches for mux clocks */
 static const struct of_device_id clk_mux_match[] __initconst = {
        {.compatible = "allwinner,sun4i-a10-cpu-clk", .data = &sun4i_cpu_mux_data,},
-       {.compatible = "allwinner,sun4i-a10-apb1-mux-clk", .data = &sun4i_apb1_mux_data,},
        {.compatible = "allwinner,sun6i-a31-ahb1-mux-clk", .data = &sun6i_a31_ahb1_mux_data,},
        {}
 };
@@ -1102,16 +1124,21 @@ static const struct of_device_id clk_gates_match[] __initconst = {
        {.compatible = "allwinner,sun6i-a31-ahb1-gates-clk", .data = &sun6i_a31_ahb1_gates_data,},
        {.compatible = "allwinner,sun7i-a20-ahb-gates-clk", .data = &sun7i_a20_ahb_gates_data,},
        {.compatible = "allwinner,sun8i-a23-ahb1-gates-clk", .data = &sun8i_a23_ahb1_gates_data,},
+       {.compatible = "allwinner,sun9i-a80-ahb0-gates-clk", .data = &sun9i_a80_ahb0_gates_data,},
+       {.compatible = "allwinner,sun9i-a80-ahb1-gates-clk", .data = &sun9i_a80_ahb1_gates_data,},
+       {.compatible = "allwinner,sun9i-a80-ahb2-gates-clk", .data = &sun9i_a80_ahb2_gates_data,},
        {.compatible = "allwinner,sun4i-a10-apb0-gates-clk", .data = &sun4i_apb0_gates_data,},
        {.compatible = "allwinner,sun5i-a10s-apb0-gates-clk", .data = &sun5i_a10s_apb0_gates_data,},
        {.compatible = "allwinner,sun5i-a13-apb0-gates-clk", .data = &sun5i_a13_apb0_gates_data,},
        {.compatible = "allwinner,sun7i-a20-apb0-gates-clk", .data = &sun7i_a20_apb0_gates_data,},
+       {.compatible = "allwinner,sun9i-a80-apb0-gates-clk", .data = &sun9i_a80_apb0_gates_data,},
        {.compatible = "allwinner,sun4i-a10-apb1-gates-clk", .data = &sun4i_apb1_gates_data,},
        {.compatible = "allwinner,sun5i-a10s-apb1-gates-clk", .data = &sun5i_a10s_apb1_gates_data,},
        {.compatible = "allwinner,sun5i-a13-apb1-gates-clk", .data = &sun5i_a13_apb1_gates_data,},
        {.compatible = "allwinner,sun6i-a31-apb1-gates-clk", .data = &sun6i_a31_apb1_gates_data,},
        {.compatible = "allwinner,sun7i-a20-apb1-gates-clk", .data = &sun7i_a20_apb1_gates_data,},
        {.compatible = "allwinner,sun8i-a23-apb1-gates-clk", .data = &sun8i_a23_apb1_gates_data,},
+       {.compatible = "allwinner,sun9i-a80-apb1-gates-clk", .data = &sun9i_a80_apb1_gates_data,},
        {.compatible = "allwinner,sun6i-a31-apb2-gates-clk", .data = &sun6i_a31_apb2_gates_data,},
        {.compatible = "allwinner,sun8i-a23-apb2-gates-clk", .data = &sun8i_a23_apb2_gates_data,},
        {.compatible = "allwinner,sun4i-a10-usb-clk", .data = &sun4i_a10_usb_gates_data,},
@@ -1200,3 +1227,9 @@ static void __init sun6i_init_clocks(struct device_node *node)
 }
 CLK_OF_DECLARE(sun6i_a31_clk_init, "allwinner,sun6i-a31", sun6i_init_clocks);
 CLK_OF_DECLARE(sun8i_a23_clk_init, "allwinner,sun8i-a23", sun6i_init_clocks);
+
+static void __init sun9i_init_clocks(struct device_node *node)
+{
+       sunxi_init_clocks(NULL, 0);
+}
+CLK_OF_DECLARE(sun9i_a80_clk_init, "allwinner,sun9i-a80", sun9i_init_clocks);
index f56147a1daed54a2e7fe4be9c983e46db95ecfc6..fde97d6e31d6d9749698aaf91bfae821f1a72f9f 100644 (file)
@@ -211,6 +211,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
        /* OPPs might be populated at runtime, don't check for error here */
        of_init_opp_table(cpu_dev);
 
+       /*
+        * But we need OPP table to function so if it is not there let's
+        * give platform code chance to provide it for us.
+        */
+       ret = dev_pm_opp_get_opp_count(cpu_dev);
+       if (ret <= 0) {
+               pr_debug("OPP table is not ready, deferring probe\n");
+               ret = -EPROBE_DEFER;
+               goto out_free_opp;
+       }
+
        priv = kzalloc(sizeof(*priv), GFP_KERNEL);
        if (!priv) {
                ret = -ENOMEM;
index a09a29c312a9cbeef8cb43a2862c07f13c78a24e..46bed4f81cde882e8f1d3b3dbd1b9418b3de2ee5 100644 (file)
@@ -2028,6 +2028,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
        /* Don't start any governor operations if we are entering suspend */
        if (cpufreq_suspended)
                return 0;
+       /*
+        * Governor might not be initiated here if ACPI _PPC changed
+        * notification happened, so check it.
+        */
+       if (!policy->governor)
+               return -EINVAL;
 
        if (policy->governor->max_transition_latency &&
            policy->cpuinfo.transition_latency >
index c913906a719ee0f9b44598593b4900c201395596..0f6b229afcb9e621eb116f6ce805a619ddfe96b3 100644 (file)
@@ -1,5 +1,5 @@
 /*
- *  (C) 2001-2004  Dave Jones. <davej@redhat.com>
+ *  (C) 2001-2004  Dave Jones.
  *  (C) 2002  Padraig Brady. <padraig@antefacto.com>
  *
  *  Licensed under the terms of the GNU GPL License version 2.
@@ -1008,7 +1008,7 @@ MODULE_PARM_DESC(revid_errata, "Ignore CPU Revision ID");
 module_param(enable, int, 0644);
 MODULE_PARM_DESC(enable, "Enable driver");
 
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
 MODULE_DESCRIPTION("Longhaul driver for VIA Cyrix processors.");
 MODULE_LICENSE("GPL");
 
index f91027259c3ce7ab84da0db0feb8997c50824bf8..e6f24b281e3edcb5289202e5f149e88d9e5a92e6 100644 (file)
@@ -300,7 +300,7 @@ static void __exit powernow_k6_exit(void)
 }
 
 
-MODULE_AUTHOR("Arjan van de Ven, Dave Jones <davej@redhat.com>, "
+MODULE_AUTHOR("Arjan van de Ven, Dave Jones, "
                "Dominik Brodowski <linux@brodo.de>");
 MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors.");
 MODULE_LICENSE("GPL");
index e61e224475ad457fd71b6934b2a5256a2d96fcc0..37c5742482d8cf4b5a58218cc84f36135184bb06 100644 (file)
@@ -1,7 +1,6 @@
 /*
  *  AMD K7 Powernow driver.
  *  (C) 2003 Dave Jones on behalf of SuSE Labs.
- *  (C) 2003-2004 Dave Jones <davej@redhat.com>
  *
  *  Licensed under the terms of the GNU GPL License version 2.
  *  Based upon datasheets & sample CPUs kindly provided by AMD.
@@ -701,7 +700,7 @@ static void __exit powernow_exit(void)
 module_param(acpi_force,  int, 0444);
 MODULE_PARM_DESC(acpi_force, "Force ACPI to be used.");
 
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
 MODULE_DESCRIPTION("Powernow driver for AMD K7 processors.");
 MODULE_LICENSE("GPL");
 
index 1a07b5904ed55c1504c65e3a7f49061b78c9ea62..e56d632a8b2107be82c7fffe94c8cf57d02aaca1 100644 (file)
@@ -378,8 +378,7 @@ static void __exit speedstep_exit(void)
 }
 
 
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>, "
-               "Dominik Brodowski <linux@brodo.de>");
+MODULE_AUTHOR("Dave Jones, Dominik Brodowski <linux@brodo.de>");
 MODULE_DESCRIPTION("Speedstep driver for Intel mobile processors on chipsets "
                "with ICH-M southbridges.");
 MODULE_LICENSE("GPL");
index e9248bb9173ae267ff09c14d06128527352f9f20..aedec09579340b2db42095e3acebcbc4776543c8 100644 (file)
 
 #include <asm/machdep.h>
 #include <asm/firmware.h>
+#include <asm/opal.h>
 #include <asm/runlatch.h>
 
-/* Flags and constants used in PowerNV platform */
-
 #define MAX_POWERNV_IDLE_STATES        8
-#define IDLE_USE_INST_NAP      0x00010000 /* Use nap instruction */
-#define IDLE_USE_INST_SLEEP    0x00020000 /* Use sleep instruction */
 
 struct cpuidle_driver powernv_idle_driver = {
        .name             = "powernv_idle",
@@ -197,7 +194,7 @@ static int powernv_add_idle_states(void)
                 * target residency to be 10x exit_latency
                 */
                latency_ns = be32_to_cpu(idle_state_latency[i]);
-               if (flags & IDLE_USE_INST_NAP) {
+               if (flags & OPAL_PM_NAP_ENABLED) {
                        /* Add NAP state */
                        strcpy(powernv_states[nr_idle_states].name, "Nap");
                        strcpy(powernv_states[nr_idle_states].desc, "Nap");
@@ -210,7 +207,8 @@ static int powernv_add_idle_states(void)
                        nr_idle_states++;
                }
 
-               if (flags & IDLE_USE_INST_SLEEP) {
+               if (flags & OPAL_PM_SLEEP_ENABLED ||
+                       flags & OPAL_PM_SLEEP_ENABLED_ER1) {
                        /* Add FASTSLEEP state */
                        strcpy(powernv_states[nr_idle_states].name, "FastSleep");
                        strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
index 37263d9a105127079cd71baed298467495000dd6..401c0106ed345eda469a590aa345f88d8ff59eca 100644 (file)
@@ -79,12 +79,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
 
        last_state = &ldev->states[last_idx];
 
-       if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
-               last_residency = cpuidle_get_last_residency(dev) - \
-                                        drv->states[last_idx].exit_latency;
-       }
-       else
-               last_residency = last_state->threshold.promotion_time + 1;
+       last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
 
        /* consider promotion */
        if (last_idx < drv->state_count - 1 &&
index 659d7b0c9ebfd1e78348d539f497112c36f2cdc5..40580794e23dc00f4d086a696c803ed35b4c0f25 100644 (file)
@@ -396,8 +396,8 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * power state and occurrence of the wakeup event.
         *
         * If the entered idle state didn't support residency measurements,
-        * we are basically lost in the dark how much time passed.
-        * As a compromise, assume we slept for the whole expected time.
+        * we use them anyway if they are short, and if long,
+        * truncate to the whole expected time.
         *
         * Any measured amount of time will include the exit latency.
         * Since we are interested in when the wakeup begun, not when it
@@ -405,22 +405,17 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
         * the measured amount of time is less than the exit latency,
         * assume the state was never reached and the exit latency is 0.
         */
-       if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
-               /* Use timer value as is */
-               measured_us = data->next_timer_us;
 
-       } else {
-               /* Use measured value */
-               measured_us = cpuidle_get_last_residency(dev);
+       /* measured value */
+       measured_us = cpuidle_get_last_residency(dev);
 
-               /* Deduct exit latency */
-               if (measured_us > target->exit_latency)
-                       measured_us -= target->exit_latency;
+       /* Deduct exit latency */
+       if (measured_us > target->exit_latency)
+               measured_us -= target->exit_latency;
 
-               /* Make sure our coefficients do not exceed unity */
-               if (measured_us > data->next_timer_us)
-                       measured_us = data->next_timer_us;
-       }
+       /* Make sure our coefficients do not exceed unity */
+       if (measured_us > data->next_timer_us)
+               measured_us = data->next_timer_us;
 
        /* Update our correction ratio */
        new_factor = data->correction_factor[data->bucket];
index 4f7b275f2f7b0fdbb488055057c3291a1aab3a70..7d4974b83af7821649c7eff1f75ee4d0b05758e6 100644 (file)
@@ -121,13 +121,9 @@ static int kfd_open(struct inode *inode, struct file *filep)
        if (IS_ERR(process))
                return PTR_ERR(process);
 
-       process->is_32bit_user_mode = is_32bit_user_mode;
-
        dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
                process->pasid, process->is_32bit_user_mode);
 
-       kfd_init_apertures(process);
-
        return 0;
 }
 
index 66df4da01c29a200d64a6a801d8d250231d7d269..e64aa99e5e416349071f3c0906be347ed42e3e53 100644 (file)
@@ -299,13 +299,13 @@ int kfd_init_apertures(struct kfd_process *process)
        struct kfd_dev *dev;
        struct kfd_process_device *pdd;
 
-       mutex_lock(&process->mutex);
-
        /*Iterating over all devices*/
        while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
                id < NUM_OF_SUPPORTED_GPUS) {
 
                pdd = kfd_get_process_device_data(dev, process, 1);
+               if (!pdd)
+                       return -1;
 
                /*
                 * For 64 bit process aperture will be statically reserved in
@@ -348,8 +348,6 @@ int kfd_init_apertures(struct kfd_process *process)
                id++;
        }
 
-       mutex_unlock(&process->mutex);
-
        return 0;
 }
 
index b85eb0b830b41d5d894efebb2a3e6fab83ca7552..3c76ef05cbcf798483b482ac09b7bc62223d92c5 100644 (file)
@@ -26,6 +26,8 @@
 #include <linux/slab.h>
 #include <linux/amd-iommu.h>
 #include <linux/notifier.h>
+#include <linux/compat.h>
+
 struct mm_struct;
 
 #include "kfd_priv.h"
@@ -285,8 +287,15 @@ static struct kfd_process *create_process(const struct task_struct *thread)
        if (err != 0)
                goto err_process_pqm_init;
 
+       /* init process apertures*/
+       process->is_32bit_user_mode = is_compat_task();
+       if (kfd_init_apertures(process) != 0)
+               goto err_init_apretures;
+
        return process;
 
+err_init_apretures:
+       pqm_uninit(&process->pqm);
 err_process_pqm_init:
        hash_del_rcu(&process->kfd_processes);
        synchronize_rcu();
index 5733e2859e8aabbc361ce821f5e337c0008dd6a3..b11792d7e70e2a6fca1fdc15636c4672fb3165dc 100644 (file)
@@ -700,8 +700,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
                                dev->node_props.simd_per_cu);
                sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
                                dev->node_props.max_slots_scratch_cu);
-               sysfs_show_32bit_prop(buffer, "engine_id",
-                               dev->node_props.engine_id);
                sysfs_show_32bit_prop(buffer, "vendor_id",
                                dev->node_props.vendor_id);
                sysfs_show_32bit_prop(buffer, "device_id",
@@ -715,6 +713,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
                                                dev->gpu->kgd));
                        sysfs_show_64bit_prop(buffer, "local_mem_size",
                                        kfd2kgd->get_vmem_size(dev->gpu->kgd));
+
+                       sysfs_show_32bit_prop(buffer, "fw_version",
+                                       kfd2kgd->get_fw_version(
+                                                       dev->gpu->kgd,
+                                                       KGD_ENGINE_MEC1));
+
                }
 
                ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
index 9c729dd8dd50a077d35f77ed3646438fb80d5c86..47b551970a14aed723b24f7d2662293fa8914dff 100644 (file)
@@ -45,6 +45,17 @@ enum kgd_memory_pool {
        KGD_POOL_FRAMEBUFFER = 3,
 };
 
+enum kgd_engine_type {
+       KGD_ENGINE_PFP = 1,
+       KGD_ENGINE_ME,
+       KGD_ENGINE_CE,
+       KGD_ENGINE_MEC1,
+       KGD_ENGINE_MEC2,
+       KGD_ENGINE_RLC,
+       KGD_ENGINE_SDMA,
+       KGD_ENGINE_MAX
+};
+
 struct kgd2kfd_shared_resources {
        /* Bit n == 1 means VMID n is available for KFD. */
        unsigned int compute_vmid_bitmap;
@@ -137,6 +148,8 @@ struct kgd2kfd_calls {
  *
  * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
  *
+ * @get_fw_version: Returns FW versions from the header
+ *
  * This structure contains function pointers to services that the kgd driver
  * provides to amdkfd driver.
  *
@@ -176,6 +189,8 @@ struct kfd2kgd_calls {
        int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
                                unsigned int timeout, uint32_t pipe_id,
                                uint32_t queue_id);
+       uint16_t (*get_fw_version)(struct kgd_dev *kgd,
+                               enum kgd_engine_type type);
 };
 
 bool kgd2kfd_init(unsigned interface_version,
index 4a78a773151ce71f464d36cc4d1d6d887eea1619..bbdbe4721573a92667fe3989bbc1e0be8123bc2b 100644 (file)
@@ -61,7 +61,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
        struct drm_crtc_state *crtc_state;
 
        if (plane->state->crtc) {
-               crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)];
+               crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
 
                if (WARN_ON(!crtc_state))
                        return;
index f5a5f18efa5bf230d810d8c031bb30cfddd0390a..4d79dad9d44fad0ac48b2b81177ffda8430b390b 100644 (file)
@@ -830,6 +830,8 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
  * vblank events since the system was booted, including lost events due to
  * modesetting activity.
  *
+ * This is the legacy version of drm_crtc_vblank_count().
+ *
  * Returns:
  * The software vblank counter.
  */
@@ -843,6 +845,25 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
 }
 EXPORT_SYMBOL(drm_vblank_count);
 
+/**
+ * drm_crtc_vblank_count - retrieve "cooked" vblank counter value
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ *
+ * This is the native KMS version of drm_vblank_count().
+ *
+ * Returns:
+ * The software vblank counter.
+ */
+u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
+{
+       return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_count);
+
 /**
  * drm_vblank_count_and_time - retrieve "cooked" vblank counter value
  * and the system timestamp corresponding to that vblank counter value.
@@ -904,6 +925,8 @@ static void send_vblank_event(struct drm_device *dev,
  *
  * Updates sequence # and timestamp on event, and sends it to userspace.
  * Caller must hold event lock.
+ *
+ * This is the legacy version of drm_crtc_send_vblank_event().
  */
 void drm_send_vblank_event(struct drm_device *dev, int crtc,
                struct drm_pending_vblank_event *e)
@@ -922,6 +945,23 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
 }
 EXPORT_SYMBOL(drm_send_vblank_event);
 
+/**
+ * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ *
+ * This is the native KMS version of drm_send_vblank_event().
+ */
+void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+                               struct drm_pending_vblank_event *e)
+{
+       drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+}
+EXPORT_SYMBOL(drm_crtc_send_vblank_event);
+
 /**
  * drm_vblank_enable - enable the vblank interrupt on a CRTC
  * @dev: DRM device
@@ -1594,6 +1634,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
  *
  * Drivers should call this routine in their vblank interrupt handlers to
  * update the vblank counter and send any signals that may be pending.
+ *
+ * This is the legacy version of drm_crtc_handle_vblank().
  */
 bool drm_handle_vblank(struct drm_device *dev, int crtc)
 {
@@ -1670,3 +1712,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
        return true;
 }
 EXPORT_SYMBOL(drm_handle_vblank);
+
+/**
+ * drm_crtc_handle_vblank - handle a vblank event
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ *
+ * This is the native KMS version of drm_handle_vblank().
+ *
+ * Returns:
+ * True if the event was successfully handled, false on failure.
+ */
+bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
+{
+       return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_handle_vblank);
index f990ab4c3efbfbe1f91f15cc89eda8b5eb0fa64d..574057cd1d0986b6bc96819f67c72ba3f5280891 100644 (file)
@@ -811,6 +811,8 @@ int i915_reset(struct drm_device *dev)
        if (!i915.reset)
                return 0;
 
+       intel_reset_gt_powersave(dev);
+
        mutex_lock(&dev->struct_mutex);
 
        i915_gem_reset(dev);
@@ -880,7 +882,7 @@ int i915_reset(struct drm_device *dev)
                 * of re-init after reset.
                 */
                if (INTEL_INFO(dev)->gen > 5)
-                       intel_reset_gt_powersave(dev);
+                       intel_enable_gt_powersave(dev);
        } else {
                mutex_unlock(&dev->struct_mutex);
        }
@@ -1584,7 +1586,7 @@ static struct drm_driver driver = {
        .gem_prime_import = i915_gem_prime_import,
 
        .dumb_create = i915_gem_dumb_create,
-       .dumb_map_offset = i915_gem_dumb_map_offset,
+       .dumb_map_offset = i915_gem_mmap_gtt,
        .dumb_destroy = drm_gem_dumb_destroy,
        .ioctls = i915_ioctls,
        .fops = &i915_driver_fops,
index 63bcda5541ecad36179cf72491293694154b1a3e..70d0f0f06f1a65ccec340c0882f0481bf66903a5 100644 (file)
@@ -2501,9 +2501,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
-int i915_gem_dumb_map_offset(struct drm_file *file_priv,
-                            struct drm_device *dev, uint32_t handle,
-                            uint64_t *offset);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+                     uint32_t handle, uint64_t *offset);
 /**
  * Returns true if seq1 is later than seq2.
  */
index 4a9faea626dbf64eec08bf257b6d8a68fe7a7513..52adcb680be3a61113630493c6c5b98509965912 100644 (file)
@@ -401,7 +401,6 @@ static int
 i915_gem_create(struct drm_file *file,
                struct drm_device *dev,
                uint64_t size,
-               bool dumb,
                uint32_t *handle_p)
 {
        struct drm_i915_gem_object *obj;
@@ -417,7 +416,6 @@ i915_gem_create(struct drm_file *file,
        if (obj == NULL)
                return -ENOMEM;
 
-       obj->base.dumb = dumb;
        ret = drm_gem_handle_create(file, &obj->base, &handle);
        /* drop reference from allocate - handle holds it now */
        drm_gem_object_unreference_unlocked(&obj->base);
@@ -437,7 +435,7 @@ i915_gem_dumb_create(struct drm_file *file,
        args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
        args->size = args->pitch * args->height;
        return i915_gem_create(file, dev,
-                              args->size, true, &args->handle);
+                              args->size, &args->handle);
 }
 
 /**
@@ -450,7 +448,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_create *args = data;
 
        return i915_gem_create(file, dev,
-                              args->size, false, &args->handle);
+                              args->size, &args->handle);
 }
 
 static inline int
@@ -1840,10 +1838,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
        drm_gem_free_mmap_offset(&obj->base);
 }
 
-static int
+int
 i915_gem_mmap_gtt(struct drm_file *file,
                  struct drm_device *dev,
-                 uint32_t handle, bool dumb,
+                 uint32_t handle,
                  uint64_t *offset)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1860,13 +1858,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
                goto unlock;
        }
 
-       /*
-        * We don't allow dumb mmaps on objects created using another
-        * interface.
-        */
-       WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
-                 "Illegal dumb map of accelerated buffer.\n");
-
        if (obj->base.size > dev_priv->gtt.mappable_end) {
                ret = -E2BIG;
                goto out;
@@ -1891,15 +1882,6 @@ unlock:
        return ret;
 }
 
-int
-i915_gem_dumb_map_offset(struct drm_file *file,
-                        struct drm_device *dev,
-                        uint32_t handle,
-                        uint64_t *offset)
-{
-       return i915_gem_mmap_gtt(file, dev, handle, true, offset);
-}
-
 /**
  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  * @dev: DRM device
@@ -1921,7 +1903,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
 {
        struct drm_i915_gem_mmap_gtt *args = data;
 
-       return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
+       return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
 }
 
 static inline int
index d17ff435f2767fa31c51ed08ef5e3130aa4dddbd..d011ec82ef1ebfbb37b3c903f9d77b7e503a30f0 100644 (file)
@@ -473,7 +473,12 @@ mi_set_context(struct intel_engine_cs *ring,
               u32 hw_flags)
 {
        u32 flags = hw_flags | MI_MM_SPACE_GTT;
-       int ret;
+       const int num_rings =
+               /* Use an extended w/a on ivb+ if signalling from other rings */
+               i915_semaphore_is_enabled(ring->dev) ?
+               hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+               0;
+       int len, i, ret;
 
        /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
         * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
@@ -490,15 +495,31 @@ mi_set_context(struct intel_engine_cs *ring,
        if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
                flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
 
-       ret = intel_ring_begin(ring, 6);
+
+       len = 4;
+       if (INTEL_INFO(ring->dev)->gen >= 7)
+               len += 2 + (num_rings ? 4*num_rings + 2 : 0);
+
+       ret = intel_ring_begin(ring, len);
        if (ret)
                return ret;
 
        /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
-       if (INTEL_INFO(ring->dev)->gen >= 7)
+       if (INTEL_INFO(ring->dev)->gen >= 7) {
                intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
-       else
-               intel_ring_emit(ring, MI_NOOP);
+               if (num_rings) {
+                       struct intel_engine_cs *signaller;
+
+                       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
+                       for_each_ring(signaller, to_i915(ring->dev), i) {
+                               if (signaller == ring)
+                                       continue;
+
+                               intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
+                               intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+                       }
+               }
+       }
 
        intel_ring_emit(ring, MI_NOOP);
        intel_ring_emit(ring, MI_SET_CONTEXT);
@@ -510,10 +531,21 @@ mi_set_context(struct intel_engine_cs *ring,
         */
        intel_ring_emit(ring, MI_NOOP);
 
-       if (INTEL_INFO(ring->dev)->gen >= 7)
+       if (INTEL_INFO(ring->dev)->gen >= 7) {
+               if (num_rings) {
+                       struct intel_engine_cs *signaller;
+
+                       intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
+                       for_each_ring(signaller, to_i915(ring->dev), i) {
+                               if (signaller == ring)
+                                       continue;
+
+                               intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
+                               intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+                       }
+               }
                intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
-       else
-               intel_ring_emit(ring, MI_NOOP);
+       }
 
        intel_ring_advance(ring);
 
index f06027ba3ee5512a7718deb22112f9fe6e16458b..11738316394af9b16669155dbbec74fde0f22223 100644 (file)
@@ -121,9 +121,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
                        goto err;
                }
 
-               WARN_ONCE(obj->base.dumb,
-                         "GPU use of dumb buffer is illegal.\n");
-
                drm_gem_object_reference(&obj->base);
                list_add_tail(&obj->obj_exec_link, &objects);
        }
index 981834b0f9b6309b4f09911c4406a58fadbe0287..996c2931c49945d86a595c6f38104c475fc32f1b 100644 (file)
@@ -281,10 +281,14 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
        struct drm_i915_private *dev_priv = dev->dev_private;
 
        spin_lock_irq(&dev_priv->irq_lock);
+
        WARN_ON(dev_priv->rps.pm_iir);
        WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
        dev_priv->rps.interrupts_enabled = true;
+       I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
+                               dev_priv->pm_rps_events);
        gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+
        spin_unlock_irq(&dev_priv->irq_lock);
 }
 
@@ -3307,8 +3311,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
        GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
 
        if (INTEL_INFO(dev)->gen >= 6) {
-               pm_irqs |= dev_priv->pm_rps_events;
-
+               /*
+                * RPS interrupts will get enabled/disabled on demand when RPS
+                * itself is enabled/disabled.
+                */
                if (HAS_VEBOX(dev))
                        pm_irqs |= PM_VEBOX_USER_INTERRUPT;
 
@@ -3520,7 +3526,11 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
        dev_priv->pm_irq_mask = 0xffffffff;
        GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
        GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
-       GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
+       /*
+        * RPS interrupts will get enabled/disabled on demand when RPS itself
+        * is enabled/disabled.
+        */
+       GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
        GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
 }
 
@@ -3609,7 +3619,7 @@ static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
 
        vlv_display_irq_reset(dev_priv);
 
-       dev_priv->irq_mask = 0;
+       dev_priv->irq_mask = ~0;
 }
 
 static void valleyview_irq_uninstall(struct drm_device *dev)
index eefdc238f70bd691d584f6d32b5d055f8d1546a5..172de3b3433b20b57d7e57f31ad63d3daa09bba3 100644 (file)
 #define   PIPE_CONTROL_STORE_DATA_INDEX                        (1<<21)
 #define   PIPE_CONTROL_CS_STALL                                (1<<20)
 #define   PIPE_CONTROL_TLB_INVALIDATE                  (1<<18)
+#define   PIPE_CONTROL_MEDIA_STATE_CLEAR               (1<<16)
 #define   PIPE_CONTROL_QW_WRITE                                (1<<14)
 #define   PIPE_CONTROL_POST_SYNC_OP_MASK                (3<<14)
 #define   PIPE_CONTROL_DEPTH_STALL                     (1<<13)
@@ -1128,6 +1129,7 @@ enum punit_power_well {
 #define GEN6_VERSYNC   (RING_SYNC_1(VEBOX_RING_BASE))
 #define GEN6_VEVSYNC   (RING_SYNC_2(VEBOX_RING_BASE))
 #define GEN6_NOSYNC 0
+#define RING_PSMI_CTL(base)    ((base)+0x50)
 #define RING_MAX_IDLE(base)    ((base)+0x54)
 #define RING_HWS_PGA(base)     ((base)+0x80)
 #define RING_HWS_PGA_GEN6(base)        ((base)+0x2080)
@@ -1458,6 +1460,7 @@ enum punit_power_well {
 #define   GEN6_BLITTER_FBC_NOTIFY                      (1<<3)
 
 #define GEN6_RC_SLEEP_PSMI_CONTROL     0x2050
+#define   GEN6_PSMI_SLEEP_MSG_DISABLE  (1 << 0)
 #define   GEN8_RC_SEMA_IDLE_MSG_DISABLE        (1 << 12)
 #define   GEN8_FF_DOP_CLOCK_GATE_DISABLE       (1<<10)
 
index 1f4b56e273c811a7c33149e927985b3f973ac4b3..964b28e3c6303e5db6ecd5458c42b90b27cdd11f 100644 (file)
@@ -6191,6 +6191,20 @@ void intel_cleanup_gt_powersave(struct drm_device *dev)
                valleyview_cleanup_gt_powersave(dev);
 }
 
+static void gen6_suspend_rps(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+       /*
+        * TODO: disable RPS interrupts on GEN9+ too once RPS support
+        * is added for it.
+        */
+       if (INTEL_INFO(dev)->gen < 9)
+               gen6_disable_rps_interrupts(dev);
+}
+
 /**
  * intel_suspend_gt_powersave - suspend PM work and helper threads
  * @dev: drm device
@@ -6206,14 +6220,7 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
        if (INTEL_INFO(dev)->gen < 6)
                return;
 
-       flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
-       /*
-        * TODO: disable RPS interrupts on GEN9+ too once RPS support
-        * is added for it.
-        */
-       if (INTEL_INFO(dev)->gen < 9)
-               gen6_disable_rps_interrupts(dev);
+       gen6_suspend_rps(dev);
 
        /* Force GPU to min freq during suspend */
        gen6_rps_idle(dev_priv);
@@ -6316,8 +6323,11 @@ void intel_reset_gt_powersave(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       if (INTEL_INFO(dev)->gen < 6)
+               return;
+
+       gen6_suspend_rps(dev);
        dev_priv->rps.enabled = false;
-       intel_enable_gt_powersave(dev);
 }
 
 static void ibx_init_clock_gating(struct drm_device *dev)
index 9f445e9a75d1a1788669ab2969a2d769cf7d204e..c7bc93d28d84ec4356c0c5f5c4e4cd67296df709 100644 (file)
@@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
                flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
                flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+               flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
                /*
                 * TLB invalidate requires a post-sync write.
                 */
                flags |= PIPE_CONTROL_QW_WRITE;
                flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
 
+               flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
+
                /* Workaround: we must issue a pipe_control with CS-stall bit
                 * set before a pipe_control command that has the state cache
                 * invalidate bit set. */
index aa873048308b8e6e3e431152963aed5b04c13f02..94a5bee69fe724c94542bc5181e4309e78b78300 100644 (file)
@@ -386,9 +386,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
                        msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
                drm_gem_object_unreference(gpu->memptrs_bo);
        }
-       if (gpu->pm4)
-               release_firmware(gpu->pm4);
-       if (gpu->pfp)
-               release_firmware(gpu->pfp);
+       release_firmware(gpu->pm4);
+       release_firmware(gpu->pfp);
        msm_gpu_cleanup(&gpu->base);
 }
index fbebb0405d76d7df217c1772a26cc43e78578b29..b4e70e0e3cfa603b1ee8bbf393dd07402c96fbbb 100644 (file)
@@ -141,6 +141,15 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
        uint32_t hpd_ctrl;
        int i, ret;
 
+       for (i = 0; i < config->hpd_reg_cnt; i++) {
+               ret = regulator_enable(hdmi->hpd_regs[i]);
+               if (ret) {
+                       dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
+                                       config->hpd_reg_names[i], ret);
+                       goto fail;
+               }
+       }
+
        ret = gpio_config(hdmi, true);
        if (ret) {
                dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
@@ -164,15 +173,6 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
                }
        }
 
-       for (i = 0; i < config->hpd_reg_cnt; i++) {
-               ret = regulator_enable(hdmi->hpd_regs[i]);
-               if (ret) {
-                       dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
-                                       config->hpd_reg_names[i], ret);
-                       goto fail;
-               }
-       }
-
        hdmi_set_mode(hdmi, false);
        phy->funcs->reset(phy);
        hdmi_set_mode(hdmi, true);
@@ -200,7 +200,7 @@ fail:
        return ret;
 }
 
-static int hdp_disable(struct hdmi_connector *hdmi_connector)
+static void hdp_disable(struct hdmi_connector *hdmi_connector)
 {
        struct hdmi *hdmi = hdmi_connector->hdmi;
        const struct hdmi_platform_config *config = hdmi->config;
@@ -212,28 +212,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector)
 
        hdmi_set_mode(hdmi, false);
 
-       for (i = 0; i < config->hpd_reg_cnt; i++) {
-               ret = regulator_disable(hdmi->hpd_regs[i]);
-               if (ret) {
-                       dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
-                                       config->hpd_reg_names[i], ret);
-                       goto fail;
-               }
-       }
-
        for (i = 0; i < config->hpd_clk_cnt; i++)
                clk_disable_unprepare(hdmi->hpd_clks[i]);
 
        ret = gpio_config(hdmi, false);
-       if (ret) {
-               dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
-               goto fail;
-       }
-
-       return 0;
+       if (ret)
+               dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
 
-fail:
-       return ret;
+       for (i = 0; i < config->hpd_reg_cnt; i++) {
+               ret = regulator_disable(hdmi->hpd_regs[i]);
+               if (ret)
+                       dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
+                                       config->hpd_reg_names[i], ret);
+       }
 }
 
 static void
@@ -260,11 +251,11 @@ void hdmi_connector_irq(struct drm_connector *connector)
                        (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
                bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
 
-               DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
-
-               /* ack the irq: */
+               /* ack & disable (temporarily) HPD events: */
                hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
-                               hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
+                       HDMI_HPD_INT_CTRL_INT_ACK);
+
+               DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
 
                /* detect disconnect if we are connected or visa versa: */
                hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
index a7672e100d8b5bd5f4ac4b782a3b1d0a38197a79..3449213f1e76424f7794754f3009de631b9bd00b 100644 (file)
@@ -331,17 +331,8 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
                struct drm_crtc_state *state)
 {
        struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-       struct drm_device *dev = crtc->dev;
-
        DBG("%s: check", mdp4_crtc->name);
-
-       if (mdp4_crtc->event) {
-               dev_err(dev->dev, "already pending flip!\n");
-               return -EBUSY;
-       }
-
        // TODO anything else to check?
-
        return 0;
 }
 
@@ -357,7 +348,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        unsigned long flags;
 
-       DBG("%s: flush", mdp4_crtc->name);
+       DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
 
        WARN_ON(mdp4_crtc->event);
 
index 0e9a2e3a82d76e1e104fd1e136d8924755706586..f021f960a8a27f7d002fd7be8a01edb722e7323a 100644 (file)
@@ -303,11 +303,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
 
        DBG("%s: check", mdp5_crtc->name);
 
-       if (mdp5_crtc->event) {
-               dev_err(dev->dev, "already pending flip!\n");
-               return -EBUSY;
-       }
-
        /* request a free CTL, if none is already allocated for this CRTC */
        if (state->enable && !mdp5_crtc->ctl) {
                mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
@@ -364,7 +359,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        unsigned long flags;
 
-       DBG("%s: flush", mdp5_crtc->name);
+       DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
 
        WARN_ON(mdp5_crtc->event);
 
@@ -460,10 +455,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
        /* now that we know what irq's we want: */
        mdp5_crtc->err.irqmask = intf2err(intf);
        mdp5_crtc->vblank.irqmask = intf2vblank(intf);
-
-       /* when called from modeset_init(), skip the rest until later: */
-       if (!mdp5_kms)
-               return;
+       mdp_irq_update(&mdp5_kms->base);
 
        spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
        intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
index a11f1b80c488567d44755ad68eadc4d15dc61375..9f01a4f21af2fa969a58c0dc5e8e74a7346e227d 100644 (file)
@@ -216,17 +216,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
                goto fail;
        }
 
-       /* NOTE: the vsync and error irq's are actually associated with
-        * the INTF/encoder.. the easiest way to deal with this (ie. what
-        * we do now) is assume a fixed relationship between crtc's and
-        * encoders.  I'm not sure if there is ever a need to more freely
-        * assign crtcs to encoders, but if there is then we need to take
-        * care of error and vblank irq's that the crtc has registered,
-        * and also update user-requested vblank_mask.
-        */
-       encoder->possible_crtcs = BIT(0);
-       mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
-
+       encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
        priv->encoders[priv->num_encoders++] = encoder;
 
        /* Construct bridge/connector for HDMI: */
index 03455b64a2458e28fdcdb724c42865f6280001de..2a731722d8407de40be9d2bbd82d6cabb8f8c737 100644 (file)
@@ -42,7 +42,10 @@ static void update_irq(struct mdp_kms *mdp_kms)
        mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
 }
 
-static void update_irq_unlocked(struct mdp_kms *mdp_kms)
+/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
+ * link changes, this must be called to figure out the new global irqmask
+ */
+void mdp_irq_update(struct mdp_kms *mdp_kms)
 {
        unsigned long flags;
        spin_lock_irqsave(&list_lock, flags);
@@ -122,7 +125,7 @@ void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
        spin_unlock_irqrestore(&list_lock, flags);
 
        if (needs_update)
-               update_irq_unlocked(mdp_kms);
+               mdp_irq_update(mdp_kms);
 }
 
 void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
@@ -141,5 +144,5 @@ void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
        spin_unlock_irqrestore(&list_lock, flags);
 
        if (needs_update)
-               update_irq_unlocked(mdp_kms);
+               mdp_irq_update(mdp_kms);
 }
index 99557b5ad4fd35bedc3fd70b20571322571e9d76..b268ce95d3946fdf2f348250ce5cafeb15635b68 100644 (file)
@@ -75,7 +75,7 @@ void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
 void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
 void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
 void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
-
+void mdp_irq_update(struct mdp_kms *mdp_kms);
 
 /*
  * pixel format helpers:
index f0de412e13dc78bbc01e9b6dd7482c74358a14e9..191968256c5822ae0a7964db31377e5ba7b0e0ce 100644 (file)
@@ -23,10 +23,41 @@ struct msm_commit {
        struct drm_atomic_state *state;
        uint32_t fence;
        struct msm_fence_cb fence_cb;
+       uint32_t crtc_mask;
 };
 
 static void fence_cb(struct msm_fence_cb *cb);
 
+/* block until specified crtcs are no longer pending update, and
+ * atomically mark them as pending update
+ */
+static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
+{
+       int ret;
+
+       spin_lock(&priv->pending_crtcs_event.lock);
+       ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
+                       !(priv->pending_crtcs & crtc_mask));
+       if (ret == 0) {
+               DBG("start: %08x", crtc_mask);
+               priv->pending_crtcs |= crtc_mask;
+       }
+       spin_unlock(&priv->pending_crtcs_event.lock);
+
+       return ret;
+}
+
+/* clear specified crtcs (no longer pending update)
+ */
+static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
+{
+       spin_lock(&priv->pending_crtcs_event.lock);
+       DBG("end: %08x", crtc_mask);
+       priv->pending_crtcs &= ~crtc_mask;
+       wake_up_all_locked(&priv->pending_crtcs_event);
+       spin_unlock(&priv->pending_crtcs_event.lock);
+}
+
 static struct msm_commit *new_commit(struct drm_atomic_state *state)
 {
        struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
@@ -58,12 +89,27 @@ static void complete_commit(struct msm_commit *c)
 
        drm_atomic_helper_commit_post_planes(dev, state);
 
+       /* NOTE: _wait_for_vblanks() only waits for vblank on
+        * enabled CRTCs.  So we end up faulting when disabling
+        * due to (potentially) unref'ing the outgoing fb's
+        * before the vblank when the disable has latched.
+        *
+        * But if it did wait on disabled (or newly disabled)
+        * CRTCs, that would be racy (ie. we could have missed
+        * the irq.  We need some way to poll for pipe shut
+        * down.  Or just live with occasionally hitting the
+        * timeout in the CRTC disable path (which really should
+        * not be critical path)
+        */
+
        drm_atomic_helper_wait_for_vblanks(dev, state);
 
        drm_atomic_helper_cleanup_planes(dev, state);
 
        drm_atomic_state_free(state);
 
+       end_atomic(dev->dev_private, c->crtc_mask);
+
        kfree(c);
 }
 
@@ -97,8 +143,9 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
 int msm_atomic_commit(struct drm_device *dev,
                struct drm_atomic_state *state, bool async)
 {
-       struct msm_commit *c;
        int nplanes = dev->mode_config.num_total_plane;
+       int ncrtcs = dev->mode_config.num_crtc;
+       struct msm_commit *c;
        int i, ret;
 
        ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -106,6 +153,18 @@ int msm_atomic_commit(struct drm_device *dev,
                return ret;
 
        c = new_commit(state);
+       if (!c)
+               return -ENOMEM;
+
+       /*
+        * Figure out what crtcs we have:
+        */
+       for (i = 0; i < ncrtcs; i++) {
+               struct drm_crtc *crtc = state->crtcs[i];
+               if (!crtc)
+                       continue;
+               c->crtc_mask |= (1 << drm_crtc_index(crtc));
+       }
 
        /*
         * Figure out what fence to wait for:
@@ -121,6 +180,14 @@ int msm_atomic_commit(struct drm_device *dev,
                        add_fb(c, new_state->fb);
        }
 
+       /*
+        * Wait for pending updates on any of the same crtc's and then
+        * mark our set of crtc's as busy:
+        */
+       ret = start_atomic(dev->dev_private, c->crtc_mask);
+       if (ret)
+               return ret;
+
        /*
         * This is the point of no return - everything below never fails except
         * when the hw goes bonghits. Which means we can commit the new state on
index c795217e1bfcc6051b72afd05cb09cfbbff34c2c..9a61546a0b05276cd313b0877306b02930fa2fa5 100644 (file)
@@ -193,6 +193,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
 
        priv->wq = alloc_ordered_workqueue("msm", 0);
        init_waitqueue_head(&priv->fence_event);
+       init_waitqueue_head(&priv->pending_crtcs_event);
 
        INIT_LIST_HEAD(&priv->inactive_list);
        INIT_LIST_HEAD(&priv->fence_cbs);
index 136303818436726004b2f294c6c90d69ccdd2f05..b69ef2d5a26c0a0afa9896ed02a88a087f9cebfe 100644 (file)
@@ -96,6 +96,10 @@ struct msm_drm_private {
        /* callbacks deferred until bo is inactive: */
        struct list_head fence_cbs;
 
+       /* crtcs pending async atomic updates: */
+       uint32_t pending_crtcs;
+       wait_queue_head_t pending_crtcs_event;
+
        /* registered MMUs: */
        unsigned int num_mmus;
        struct msm_mmu *mmus[NUM_DOMAINS];
index 94d55e526b4e0732cba7fcf45a2ce725c2433637..1f3af13ccede96b0aa9ad17a49aca3dbaa358872 100644 (file)
@@ -190,8 +190,7 @@ fail_unlock:
 fail:
 
        if (ret) {
-               if (fbi)
-                       framebuffer_release(fbi);
+               framebuffer_release(fbi);
                if (fb) {
                        drm_framebuffer_unregister_private(fb);
                        drm_framebuffer_remove(fb);
index 4a6f0e49d5b5f0709d586e3dcf4e271a0c99c0f5..49dea4fb55ac5fc0c6aa4efa082326a8c892e08b 100644 (file)
@@ -535,8 +535,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
                        drm_free_large(msm_obj->pages);
 
        } else {
-               if (msm_obj->vaddr)
-                       vunmap(msm_obj->vaddr);
+               vunmap(msm_obj->vaddr);
                put_pages(obj);
        }
 
index 5d93902a91ab038cfe040197f767c0de05d7931f..f8042433752b440d72fd9306009f55e972c68385 100644 (file)
@@ -876,7 +876,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
        if (ret)
                return ret;
 
-       bo->gem.dumb = true;
        ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
        drm_gem_object_unreference_unlocked(&bo->gem);
        return ret;
@@ -892,14 +891,6 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
        gem = drm_gem_object_lookup(dev, file_priv, handle);
        if (gem) {
                struct nouveau_bo *bo = nouveau_gem_object(gem);
-
-               /*
-                * We don't allow dumb mmaps on objects created using another
-                * interface.
-                */
-               WARN_ONCE(!(gem->dumb || gem->import_attach),
-                         "Illegal dumb map of accelerated buffer.\n");
-
                *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
                drm_gem_object_unreference_unlocked(gem);
                return 0;
index 28d51a22a4bf18b5729f8f4e9cd159c830cbbfca..42c34babc2e5b728959ad5c8135266f8b3c147e2 100644 (file)
@@ -444,9 +444,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
        list_for_each_entry(nvbo, list, entry) {
                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 
-               WARN_ONCE(nvbo->gem.dumb,
-                         "GPU use of dumb buffer is illegal.\n");
-
                ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
                                             b->write_domains,
                                             b->valid_domains);
index 753a6def61e7ce474d73b21b9affe84ae8b1a861..3d1cfcb96b6bfd9e7dfa50e282d6e3761009f7e1 100644 (file)
@@ -28,6 +28,7 @@
 #include "nouveau_ttm.h"
 #include "nouveau_gem.h"
 
+#include "drm_legacy.h"
 static int
 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
 {
@@ -281,7 +282,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
        struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
 
        if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
-               return -EINVAL;
+               return drm_legacy_mmap(filp, vma);
 
        return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
 }
index fe48f229043e33720c26bcdd40949d7a8fd41b00..a46f73737994aba3f603aea4dde15ff916a91109 100644 (file)
@@ -394,10 +394,9 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        return r;
 }
 
-static int radeon_mode_mmap(struct drm_file *filp,
-                           struct drm_device *dev,
-                           uint32_t handle, bool dumb,
-                           uint64_t *offset_p)
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+                         struct drm_device *dev,
+                         uint32_t handle, uint64_t *offset_p)
 {
        struct drm_gem_object *gobj;
        struct radeon_bo *robj;
@@ -406,14 +405,6 @@ static int radeon_mode_mmap(struct drm_file *filp,
        if (gobj == NULL) {
                return -ENOENT;
        }
-
-       /*
-        * We don't allow dumb mmaps on objects created using another
-        * interface.
-        */
-       WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
-               "Illegal dumb map of GPU buffer.\n");
-
        robj = gem_to_radeon_bo(gobj);
        if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
                drm_gem_object_unreference_unlocked(gobj);
@@ -424,20 +415,12 @@ static int radeon_mode_mmap(struct drm_file *filp,
        return 0;
 }
 
-int radeon_mode_dumb_mmap(struct drm_file *filp,
-                         struct drm_device *dev,
-                         uint32_t handle, uint64_t *offset_p)
-{
-       return radeon_mode_mmap(filp, dev, handle, true, offset_p);
-}
-
 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
                          struct drm_file *filp)
 {
        struct drm_radeon_gem_mmap *args = data;
 
-       return radeon_mode_mmap(filp, dev, args->handle, false,
-                               &args->addr_ptr);
+       return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
 }
 
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -763,7 +746,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
                return -ENOMEM;
 
        r = drm_gem_handle_create(file_priv, gobj, &handle);
-       gobj->dumb = true;
        /* drop reference from allocate - handle holds it now */
        drm_gem_object_unreference_unlocked(gobj);
        if (r) {
index 065d02068ec3dc14f576c139e31a0b4bfef1580b..242fd8b1b221d9c49459b67d775aaf3c57fea45f 100644 (file)
@@ -28,6 +28,8 @@
 #include "cikd.h"
 #include "cik_reg.h"
 #include "radeon_kfd.h"
+#include "radeon_ucode.h"
+#include <linux/firmware.h>
 
 #define CIK_PIPE_PER_MEC       (4)
 
@@ -49,6 +51,7 @@ static uint64_t get_vmem_size(struct kgd_dev *kgd);
 static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
 
 static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
 
 /*
  * Register access functions
@@ -91,6 +94,7 @@ static const struct kfd2kgd_calls kfd2kgd = {
        .hqd_load = kgd_hqd_load,
        .hqd_is_occupies = kgd_hqd_is_occupies,
        .hqd_destroy = kgd_hqd_destroy,
+       .get_fw_version = get_fw_version
 };
 
 static const struct kgd2kfd_calls *kgd2kfd;
@@ -561,3 +565,52 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
        release_queue(kgd);
        return 0;
 }
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+       struct radeon_device *rdev = (struct radeon_device *) kgd;
+       const union radeon_firmware_header *hdr;
+
+       BUG_ON(kgd == NULL || rdev->mec_fw == NULL);
+
+       switch (type) {
+       case KGD_ENGINE_PFP:
+               hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data;
+               break;
+
+       case KGD_ENGINE_ME:
+               hdr = (const union radeon_firmware_header *) rdev->me_fw->data;
+               break;
+
+       case KGD_ENGINE_CE:
+               hdr = (const union radeon_firmware_header *) rdev->ce_fw->data;
+               break;
+
+       case KGD_ENGINE_MEC1:
+               hdr = (const union radeon_firmware_header *) rdev->mec_fw->data;
+               break;
+
+       case KGD_ENGINE_MEC2:
+               hdr = (const union radeon_firmware_header *)
+                                                       rdev->mec2_fw->data;
+               break;
+
+       case KGD_ENGINE_RLC:
+               hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
+               break;
+
+       case KGD_ENGINE_SDMA:
+               hdr = (const union radeon_firmware_header *)
+                                                       rdev->sdma_fw->data;
+               break;
+
+       default:
+               return 0;
+       }
+
+       if (hdr == NULL)
+               return 0;
+
+       /* Only 12 bit in use*/
+       return hdr->common.ucode_version;
+}
index 7d68223eb4692a026fbad21b68bbe9362ee6d87a..86fc56434b2875435aaf9b7dab85fdac3e39e7e9 100644 (file)
@@ -529,9 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
                        u32 current_domain =
                                radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
 
-                       WARN_ONCE(bo->gem_base.dumb,
-                                 "GPU use of dumb buffer is illegal.\n");
-
                        /* Check if this buffer will be moved and don't move it
                         * if we have moved too many buffers for this IB already.
                         *
index 3367960286a6f162455e0363a54dace2c6882883..978993fa3a360ef426b6dc48a3c287c3eda0cf48 100644 (file)
@@ -168,7 +168,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
                                 const struct tegra_dc_window *window)
 {
        unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
-       unsigned long value;
+       unsigned long value, flags;
        bool yuv, planar;
 
        /*
@@ -181,6 +181,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
        else
                bpp = planar ? 1 : 2;
 
+       spin_lock_irqsave(&dc->lock, flags);
+
        value = WINDOW_A_SELECT << index;
        tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
 
@@ -273,6 +275,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
 
                case TEGRA_BO_TILING_MODE_BLOCK:
                        DRM_ERROR("hardware doesn't support block linear mode\n");
+                       spin_unlock_irqrestore(&dc->lock, flags);
                        return -EINVAL;
                }
 
@@ -331,6 +334,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
 
        tegra_dc_window_commit(dc, index);
 
+       spin_unlock_irqrestore(&dc->lock, flags);
+
        return 0;
 }
 
@@ -338,11 +343,14 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
 {
        struct tegra_dc *dc = to_tegra_dc(plane->crtc);
        struct tegra_plane *p = to_tegra_plane(plane);
+       unsigned long flags;
        u32 value;
 
        if (!plane->crtc)
                return 0;
 
+       spin_lock_irqsave(&dc->lock, flags);
+
        value = WINDOW_A_SELECT << p->index;
        tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
 
@@ -352,6 +360,8 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
 
        tegra_dc_window_commit(dc, p->index);
 
+       spin_unlock_irqrestore(&dc->lock, flags);
+
        return 0;
 }
 
@@ -699,14 +709,16 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
        struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
        unsigned int h_offset = 0, v_offset = 0;
        struct tegra_bo_tiling tiling;
+       unsigned long value, flags;
        unsigned int format, swap;
-       unsigned long value;
        int err;
 
        err = tegra_fb_get_tiling(fb, &tiling);
        if (err < 0)
                return err;
 
+       spin_lock_irqsave(&dc->lock, flags);
+
        tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
 
        value = fb->offsets[0] + y * fb->pitches[0] +
@@ -752,6 +764,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
 
                case TEGRA_BO_TILING_MODE_BLOCK:
                        DRM_ERROR("hardware doesn't support block linear mode\n");
+                       spin_unlock_irqrestore(&dc->lock, flags);
                        return -EINVAL;
                }
 
@@ -778,6 +791,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
        tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
        tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
 
+       spin_unlock_irqrestore(&dc->lock, flags);
+
        return 0;
 }
 
@@ -814,23 +829,32 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
        unsigned long flags, base;
        struct tegra_bo *bo;
 
-       if (!dc->event)
+       spin_lock_irqsave(&drm->event_lock, flags);
+
+       if (!dc->event) {
+               spin_unlock_irqrestore(&drm->event_lock, flags);
                return;
+       }
 
        bo = tegra_fb_get_plane(crtc->primary->fb, 0);
 
+       spin_lock_irqsave(&dc->lock, flags);
+
        /* check if new start address has been latched */
+       tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
        tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
        base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
        tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
 
+       spin_unlock_irqrestore(&dc->lock, flags);
+
        if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
-               spin_lock_irqsave(&drm->event_lock, flags);
-               drm_send_vblank_event(drm, dc->pipe, dc->event);
-               drm_vblank_put(drm, dc->pipe);
+               drm_crtc_send_vblank_event(crtc, dc->event);
+               drm_crtc_vblank_put(crtc);
                dc->event = NULL;
-               spin_unlock_irqrestore(&drm->event_lock, flags);
        }
+
+       spin_unlock_irqrestore(&drm->event_lock, flags);
 }
 
 void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
@@ -843,7 +867,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
 
        if (dc->event && dc->event->base.file_priv == file) {
                dc->event->base.destroy(&dc->event->base);
-               drm_vblank_put(drm, dc->pipe);
+               drm_crtc_vblank_put(crtc);
                dc->event = NULL;
        }
 
@@ -853,16 +877,16 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
 static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                              struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
 {
+       unsigned int pipe = drm_crtc_index(crtc);
        struct tegra_dc *dc = to_tegra_dc(crtc);
-       struct drm_device *drm = crtc->dev;
 
        if (dc->event)
                return -EBUSY;
 
        if (event) {
-               event->pipe = dc->pipe;
+               event->pipe = pipe;
                dc->event = event;
-               drm_vblank_get(drm, dc->pipe);
+               drm_crtc_vblank_get(crtc);
        }
 
        tegra_dc_set_base(dc, 0, 0, fb);
@@ -1127,7 +1151,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
                /*
                dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
                */
-               drm_handle_vblank(dc->base.dev, dc->pipe);
+               drm_crtc_handle_vblank(&dc->base);
                tegra_dc_finish_page_flip(dc);
        }
 
index e549afeece1ff12c899afc9f21dd52ff5e3e64ff..d4f827593dfa2041a386dba733f9436cbed1d111 100644 (file)
@@ -694,24 +694,28 @@ static const struct file_operations tegra_drm_fops = {
        .llseek = noop_llseek,
 };
 
-static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
+                                            unsigned int pipe)
 {
        struct drm_crtc *crtc;
 
        list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
-               struct tegra_dc *dc = to_tegra_dc(crtc);
-
-               if (dc->pipe == pipe)
+               if (pipe == drm_crtc_index(crtc))
                        return crtc;
        }
 
        return NULL;
 }
 
-static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
 {
+       struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+
+       if (!crtc)
+               return 0;
+
        /* TODO: implement real hardware counter using syncpoints */
-       return drm_vblank_count(dev, crtc);
+       return drm_crtc_vblank_count(crtc);
 }
 
 static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
index da32086cbeaf28bbe0c0528eeeac4c0bb9fcb326..8777b7f757916a704ceaec670c44d34a0aee813d 100644 (file)
@@ -216,32 +216,58 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
        }
 }
 
-static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
-                             size_t size)
+static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
 {
+       struct scatterlist *s;
+       struct sg_table *sgt;
+       unsigned int i;
+
        bo->pages = drm_gem_get_pages(&bo->gem);
        if (IS_ERR(bo->pages))
                return PTR_ERR(bo->pages);
 
-       bo->num_pages = size >> PAGE_SHIFT;
-
-       bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
-       if (IS_ERR(bo->sgt)) {
-               drm_gem_put_pages(&bo->gem, bo->pages, false, false);
-               return PTR_ERR(bo->sgt);
+       bo->num_pages = bo->gem.size >> PAGE_SHIFT;
+
+       sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
+       if (IS_ERR(sgt))
+               goto put_pages;
+
+       /*
+        * Fake up the SG table so that dma_map_sg() can be used to flush the
+        * pages associated with it. Note that this relies on the fact that
+        * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
+        * only cache maintenance.
+        *
+        * TODO: Replace this by drm_clflash_sg() once it can be implemented
+        * without relying on symbols that are not exported.
+        */
+       for_each_sg(sgt->sgl, s, sgt->nents, i)
+               sg_dma_address(s) = sg_phys(s);
+
+       if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
+               sgt = ERR_PTR(-ENOMEM);
+               goto release_sgt;
        }
 
+       bo->sgt = sgt;
+
        return 0;
+
+release_sgt:
+       sg_free_table(sgt);
+       kfree(sgt);
+put_pages:
+       drm_gem_put_pages(&bo->gem, bo->pages, false, false);
+       return PTR_ERR(sgt);
 }
 
-static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
-                         size_t size)
+static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
 {
        struct tegra_drm *tegra = drm->dev_private;
        int err;
 
        if (tegra->domain) {
-               err = tegra_bo_get_pages(drm, bo, size);
+               err = tegra_bo_get_pages(drm, bo);
                if (err < 0)
                        return err;
 
@@ -251,6 +277,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
                        return err;
                }
        } else {
+               size_t size = bo->gem.size;
+
                bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
                                                   GFP_KERNEL | __GFP_NOWARN);
                if (!bo->vaddr) {
@@ -274,7 +302,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
        if (IS_ERR(bo))
                return bo;
 
-       err = tegra_bo_alloc(drm, bo, size);
+       err = tegra_bo_alloc(drm, bo);
        if (err < 0)
                goto release;
 
index c1351d9fb35bbc8d38ddc96ee9294f2b2614bf11..31e8308ba8990bffbaa6217aafa2bc0e72b62939 100644 (file)
@@ -753,6 +753,7 @@ config I2C_SH7760
 
 config I2C_SH_MOBILE
        tristate "SuperH Mobile I2C Controller"
+       depends on HAS_DMA
        depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
        help
          If you say yes to this option, support will be included for the
@@ -1072,4 +1073,15 @@ config SCx200_ACB
          This support is also available as a module.  If so, the module
          will be called scx200_acb.
 
+config I2C_OPAL
+       tristate "IBM OPAL I2C driver"
+       depends on PPC_POWERNV
+       default y
+       help
+         This exposes the PowerNV platform i2c busses to the linux i2c layer,
+         the driver is based on the OPAL interfaces.
+
+         This driver can also be built as a module. If so, the module will be
+         called as i2c-opal.
+
 endmenu
index 5e6c8223719e2587969f9ecb806409be736d674b..56388f658d2f2567cbcf4b38433212c94d0d0faf 100644 (file)
@@ -102,6 +102,7 @@ obj-$(CONFIG_I2C_ACORN)             += i2c-acorn.o
 obj-$(CONFIG_I2C_BCM_KONA)     += i2c-bcm-kona.o
 obj-$(CONFIG_I2C_CROS_EC_TUNNEL)       += i2c-cros-ec-tunnel.o
 obj-$(CONFIG_I2C_ELEKTOR)      += i2c-elektor.o
+obj-$(CONFIG_I2C_OPAL)         += i2c-opal.o
 obj-$(CONFIG_I2C_PCA_ISA)      += i2c-pca-isa.o
 obj-$(CONFIG_I2C_SIBYTE)       += i2c-sibyte.o
 obj-$(CONFIG_SCx200_ACB)       += scx200_acb.o
index 373f6d4e4080e1f3c56c1c32ffcb0e6d999fe716..30059c1df2a3b57ea559fc984b559c79e2f06ce6 100644 (file)
 #define MV64XXX_I2C_BAUD_DIV_N(val)                    (val & 0x7)
 #define MV64XXX_I2C_BAUD_DIV_M(val)                    ((val & 0xf) << 3)
 
-#define        MV64XXX_I2C_REG_CONTROL_ACK                     0x00000004
-#define        MV64XXX_I2C_REG_CONTROL_IFLG                    0x00000008
-#define        MV64XXX_I2C_REG_CONTROL_STOP                    0x00000010
-#define        MV64XXX_I2C_REG_CONTROL_START                   0x00000020
-#define        MV64XXX_I2C_REG_CONTROL_TWSIEN                  0x00000040
-#define        MV64XXX_I2C_REG_CONTROL_INTEN                   0x00000080
+#define        MV64XXX_I2C_REG_CONTROL_ACK                     BIT(2)
+#define        MV64XXX_I2C_REG_CONTROL_IFLG                    BIT(3)
+#define        MV64XXX_I2C_REG_CONTROL_STOP                    BIT(4)
+#define        MV64XXX_I2C_REG_CONTROL_START                   BIT(5)
+#define        MV64XXX_I2C_REG_CONTROL_TWSIEN                  BIT(6)
+#define        MV64XXX_I2C_REG_CONTROL_INTEN                   BIT(7)
 
 /* Ctlr status values */
 #define        MV64XXX_I2C_STATUS_BUS_ERR                      0x00
 #define        MV64XXX_I2C_REG_BRIDGE_TIMING                   0xe0
 
 /* Bridge Control values */
-#define        MV64XXX_I2C_BRIDGE_CONTROL_WR                   0x00000001
-#define        MV64XXX_I2C_BRIDGE_CONTROL_RD                   0x00000002
+#define        MV64XXX_I2C_BRIDGE_CONTROL_WR                   BIT(0)
+#define        MV64XXX_I2C_BRIDGE_CONTROL_RD                   BIT(1)
 #define        MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT           2
-#define        MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT             0x00001000
+#define        MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT             BIT(12)
 #define        MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT        13
 #define        MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT        16
-#define        MV64XXX_I2C_BRIDGE_CONTROL_ENABLE               0x00080000
+#define        MV64XXX_I2C_BRIDGE_CONTROL_ENABLE               BIT(19)
+#define        MV64XXX_I2C_BRIDGE_CONTROL_REPEATED_START       BIT(20)
 
 /* Bridge Status values */
-#define        MV64XXX_I2C_BRIDGE_STATUS_ERROR                 0x00000001
-#define        MV64XXX_I2C_STATUS_OFFLOAD_ERROR                0xf0000001
-#define        MV64XXX_I2C_STATUS_OFFLOAD_OK                   0xf0000000
-
+#define        MV64XXX_I2C_BRIDGE_STATUS_ERROR                 BIT(0)
 
 /* Driver states */
 enum {
@@ -99,14 +97,12 @@ enum {
        MV64XXX_I2C_ACTION_INVALID,
        MV64XXX_I2C_ACTION_CONTINUE,
        MV64XXX_I2C_ACTION_SEND_RESTART,
-       MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
        MV64XXX_I2C_ACTION_SEND_ADDR_1,
        MV64XXX_I2C_ACTION_SEND_ADDR_2,
        MV64XXX_I2C_ACTION_SEND_DATA,
        MV64XXX_I2C_ACTION_RCV_DATA,
        MV64XXX_I2C_ACTION_RCV_DATA_STOP,
        MV64XXX_I2C_ACTION_SEND_STOP,
-       MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP,
 };
 
 struct mv64xxx_i2c_regs {
@@ -193,75 +189,6 @@ mv64xxx_i2c_prepare_for_io(struct mv64xxx_i2c_data *drv_data,
        }
 }
 
-static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
-{
-       unsigned long data_reg_hi = 0;
-       unsigned long data_reg_lo = 0;
-       unsigned long ctrl_reg;
-       struct i2c_msg *msg = drv_data->msgs;
-
-       if (!drv_data->offload_enabled)
-               return -EOPNOTSUPP;
-
-       /* Only regular transactions can be offloaded */
-       if ((msg->flags & ~(I2C_M_TEN | I2C_M_RD)) != 0)
-               return -EINVAL;
-
-       /* Only 1-8 byte transfers can be offloaded */
-       if (msg->len < 1 || msg->len > 8)
-               return -EINVAL;
-
-       /* Build transaction */
-       ctrl_reg = MV64XXX_I2C_BRIDGE_CONTROL_ENABLE |
-                  (msg->addr << MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT);
-
-       if ((msg->flags & I2C_M_TEN) != 0)
-               ctrl_reg |=  MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT;
-
-       if ((msg->flags & I2C_M_RD) == 0) {
-               u8 local_buf[8] = { 0 };
-
-               memcpy(local_buf, msg->buf, msg->len);
-               data_reg_lo = cpu_to_le32(*((u32 *)local_buf));
-               data_reg_hi = cpu_to_le32(*((u32 *)(local_buf+4)));
-
-               ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
-                   (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT;
-
-               writel(data_reg_lo,
-                       drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
-               writel(data_reg_hi,
-                       drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
-
-       } else {
-               ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_RD |
-                   (msg->len - 1) << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT;
-       }
-
-       /* Execute transaction */
-       writel(ctrl_reg, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
-
-       return 0;
-}
-
-static void
-mv64xxx_i2c_update_offload_data(struct mv64xxx_i2c_data *drv_data)
-{
-       struct i2c_msg *msg = drv_data->msg;
-
-       if (msg->flags & I2C_M_RD) {
-               u32 data_reg_lo = readl(drv_data->reg_base +
-                               MV64XXX_I2C_REG_RX_DATA_LO);
-               u32 data_reg_hi = readl(drv_data->reg_base +
-                               MV64XXX_I2C_REG_RX_DATA_HI);
-               u8 local_buf[8] = { 0 };
-
-               *((u32 *)local_buf) = le32_to_cpu(data_reg_lo);
-               *((u32 *)(local_buf+4)) = le32_to_cpu(data_reg_hi);
-               memcpy(msg->buf, local_buf, msg->len);
-       }
-
-}
 /*
  *****************************************************************************
  *
@@ -389,16 +316,6 @@ mv64xxx_i2c_fsm(struct mv64xxx_i2c_data *drv_data, u32 status)
                drv_data->rc = -ENXIO;
                break;
 
-       case MV64XXX_I2C_STATUS_OFFLOAD_OK:
-               if (drv_data->send_stop || drv_data->aborting) {
-                       drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP;
-                       drv_data->state = MV64XXX_I2C_STATE_IDLE;
-               } else {
-                       drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_RESTART;
-                       drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_RESTART;
-               }
-               break;
-
        default:
                dev_err(&drv_data->adapter.dev,
                        "mv64xxx_i2c_fsm: Ctlr Error -- state: 0x%x, "
@@ -419,25 +336,15 @@ static void mv64xxx_i2c_send_start(struct mv64xxx_i2c_data *drv_data)
        drv_data->aborting = 0;
        drv_data->rc = 0;
 
-       /* Can we offload this msg ? */
-       if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
-               /* No, switch to standard path */
-               mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
-               writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
-                       drv_data->reg_base + drv_data->reg_offsets.control);
-       }
+       mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+       writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
+              drv_data->reg_base + drv_data->reg_offsets.control);
 }
 
 static void
 mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
 {
        switch(drv_data->action) {
-       case MV64XXX_I2C_ACTION_OFFLOAD_RESTART:
-               mv64xxx_i2c_update_offload_data(drv_data);
-               writel(0, drv_data->reg_base +  MV64XXX_I2C_REG_BRIDGE_CONTROL);
-               writel(0, drv_data->reg_base +
-                       MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
-               /* FALLTHRU */
        case MV64XXX_I2C_ACTION_SEND_RESTART:
                /* We should only get here if we have further messages */
                BUG_ON(drv_data->num_msgs == 0);
@@ -518,16 +425,71 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
                drv_data->block = 0;
                wake_up(&drv_data->waitq);
                break;
+       }
+}
 
-       case MV64XXX_I2C_ACTION_OFFLOAD_SEND_STOP:
-               mv64xxx_i2c_update_offload_data(drv_data);
-               writel(0, drv_data->reg_base +  MV64XXX_I2C_REG_BRIDGE_CONTROL);
-               writel(0, drv_data->reg_base +
-                       MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
-               drv_data->block = 0;
-               wake_up(&drv_data->waitq);
-               break;
+static void
+mv64xxx_i2c_read_offload_rx_data(struct mv64xxx_i2c_data *drv_data,
+                                struct i2c_msg *msg)
+{
+       u32 buf[2];
+
+       buf[0] = readl(drv_data->reg_base + MV64XXX_I2C_REG_RX_DATA_LO);
+       buf[1] = readl(drv_data->reg_base + MV64XXX_I2C_REG_RX_DATA_HI);
+
+       memcpy(msg->buf, buf, msg->len);
+}
+
+static int
+mv64xxx_i2c_intr_offload(struct mv64xxx_i2c_data *drv_data)
+{
+       u32 cause, status;
+
+       cause = readl(drv_data->reg_base +
+                     MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
+       if (!cause)
+               return IRQ_NONE;
+
+       status = readl(drv_data->reg_base +
+                      MV64XXX_I2C_REG_BRIDGE_STATUS);
+
+       if (status & MV64XXX_I2C_BRIDGE_STATUS_ERROR) {
+               drv_data->rc = -EIO;
+               goto out;
+       }
+
+       drv_data->rc = 0;
+
+       /*
+        * Transaction is a one message read transaction, read data
+        * for this message.
+        */
+       if (drv_data->num_msgs == 1 && drv_data->msgs[0].flags & I2C_M_RD) {
+               mv64xxx_i2c_read_offload_rx_data(drv_data, drv_data->msgs);
+               drv_data->msgs++;
+               drv_data->num_msgs--;
+       }
+       /*
+        * Transaction is a two messages write/read transaction, read
+        * data for the second (read) message.
+        */
+       else if (drv_data->num_msgs == 2 &&
+                !(drv_data->msgs[0].flags & I2C_M_RD) &&
+                drv_data->msgs[1].flags & I2C_M_RD) {
+               mv64xxx_i2c_read_offload_rx_data(drv_data, drv_data->msgs + 1);
+               drv_data->msgs += 2;
+               drv_data->num_msgs -= 2;
        }
+
+out:
+       writel(0, drv_data->reg_base +  MV64XXX_I2C_REG_BRIDGE_CONTROL);
+       writel(0, drv_data->reg_base +
+              MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE);
+       drv_data->block = 0;
+
+       wake_up(&drv_data->waitq);
+
+       return IRQ_HANDLED;
 }
 
 static irqreturn_t
@@ -540,20 +502,9 @@ mv64xxx_i2c_intr(int irq, void *dev_id)
 
        spin_lock_irqsave(&drv_data->lock, flags);
 
-       if (drv_data->offload_enabled) {
-               while (readl(drv_data->reg_base +
-                               MV64XXX_I2C_REG_BRIDGE_INTR_CAUSE)) {
-                       int reg_status = readl(drv_data->reg_base +
-                                       MV64XXX_I2C_REG_BRIDGE_STATUS);
-                       if (reg_status & MV64XXX_I2C_BRIDGE_STATUS_ERROR)
-                               status = MV64XXX_I2C_STATUS_OFFLOAD_ERROR;
-                       else
-                               status = MV64XXX_I2C_STATUS_OFFLOAD_OK;
-                       mv64xxx_i2c_fsm(drv_data, status);
-                       mv64xxx_i2c_do_action(drv_data);
-                       rc = IRQ_HANDLED;
-               }
-       }
+       if (drv_data->offload_enabled)
+               rc = mv64xxx_i2c_intr_offload(drv_data);
+
        while (readl(drv_data->reg_base + drv_data->reg_offsets.control) &
                                                MV64XXX_I2C_REG_CONTROL_IFLG) {
                status = readl(drv_data->reg_base + drv_data->reg_offsets.status);
@@ -635,6 +586,117 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
        return drv_data->rc;
 }
 
+static void
+mv64xxx_i2c_prepare_tx(struct mv64xxx_i2c_data *drv_data)
+{
+       struct i2c_msg *msg = drv_data->msgs;
+       u32 buf[2];
+
+       memcpy(buf, msg->buf, msg->len);
+
+       writel(buf[0], drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_LO);
+       writel(buf[1], drv_data->reg_base + MV64XXX_I2C_REG_TX_DATA_HI);
+}
+
+static int
+mv64xxx_i2c_offload_xfer(struct mv64xxx_i2c_data *drv_data)
+{
+       struct i2c_msg *msgs = drv_data->msgs;
+       int num = drv_data->num_msgs;
+       unsigned long ctrl_reg;
+       unsigned long flags;
+
+       spin_lock_irqsave(&drv_data->lock, flags);
+
+       /* Build transaction */
+       ctrl_reg = MV64XXX_I2C_BRIDGE_CONTROL_ENABLE |
+               (msgs[0].addr << MV64XXX_I2C_BRIDGE_CONTROL_ADDR_SHIFT);
+
+       if (msgs[0].flags & I2C_M_TEN)
+               ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_ADDR_EXT;
+
+       /* Single write message transaction */
+       if (num == 1 && !(msgs[0].flags & I2C_M_RD)) {
+               size_t len = msgs[0].len - 1;
+
+               ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_WR |
+                       (len << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT);
+               mv64xxx_i2c_prepare_tx(drv_data);
+       }
+       /* Single read message transaction */
+       else if (num == 1 && msgs[0].flags & I2C_M_RD) {
+               size_t len = msgs[0].len - 1;
+
+               ctrl_reg |= MV64XXX_I2C_BRIDGE_CONTROL_RD |
+                       (len << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT);
+       }
+       /*
+        * Transaction with one write and one read message. This is
+        * guaranteed by the mv64xx_i2c_can_offload() checks.
+        */
+       else if (num == 2) {
+               size_t lentx = msgs[0].len - 1;
+               size_t lenrx = msgs[1].len - 1;
+
+               ctrl_reg |=
+                       MV64XXX_I2C_BRIDGE_CONTROL_RD |
+                       MV64XXX_I2C_BRIDGE_CONTROL_WR |
+                       (lentx << MV64XXX_I2C_BRIDGE_CONTROL_TX_SIZE_SHIFT) |
+                       (lenrx << MV64XXX_I2C_BRIDGE_CONTROL_RX_SIZE_SHIFT) |
+                       MV64XXX_I2C_BRIDGE_CONTROL_REPEATED_START;
+               mv64xxx_i2c_prepare_tx(drv_data);
+       }
+
+       /* Execute transaction */
+       drv_data->block = 1;
+       writel(ctrl_reg, drv_data->reg_base + MV64XXX_I2C_REG_BRIDGE_CONTROL);
+       spin_unlock_irqrestore(&drv_data->lock, flags);
+
+       mv64xxx_i2c_wait_for_completion(drv_data);
+
+       return drv_data->rc;
+}
+
+static bool
+mv64xxx_i2c_valid_offload_sz(struct i2c_msg *msg)
+{
+       return msg->len <= 8 && msg->len >= 1;
+}
+
+static bool
+mv64xxx_i2c_can_offload(struct mv64xxx_i2c_data *drv_data)
+{
+       struct i2c_msg *msgs = drv_data->msgs;
+       int num = drv_data->num_msgs;
+
+       return false;
+
+       if (!drv_data->offload_enabled)
+               return false;
+
+       /*
+        * We can offload a transaction consisting of a single
+        * message, as long as the message has a length between 1 and
+        * 8 bytes.
+        */
+       if (num == 1 && mv64xxx_i2c_valid_offload_sz(msgs))
+               return true;
+
+       /*
+        * We can offload a transaction consisting of two messages, if
+        * the first is a write and a second is a read, and both have
+        * a length between 1 and 8 bytes.
+        */
+       if (num == 2 &&
+           mv64xxx_i2c_valid_offload_sz(msgs) &&
+           mv64xxx_i2c_valid_offload_sz(msgs + 1) &&
+           !(msgs[0].flags & I2C_M_RD) &&
+           msgs[1].flags & I2C_M_RD)
+               return true;
+
+       return false;
+}
+
 /*
  *****************************************************************************
  *
@@ -658,7 +720,11 @@ mv64xxx_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        drv_data->msgs = msgs;
        drv_data->num_msgs = num;
 
-       rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[0], num == 1);
+       if (mv64xxx_i2c_can_offload(drv_data))
+               rc = mv64xxx_i2c_offload_xfer(drv_data);
+       else
+               rc = mv64xxx_i2c_execute_msg(drv_data, &msgs[0], num == 1);
+
        if (rc < 0)
                ret = rc;
 
diff --git a/drivers/i2c/busses/i2c-opal.c b/drivers/i2c/busses/i2c-opal.c
new file mode 100644 (file)
index 0000000..16f90b1
--- /dev/null
@@ -0,0 +1,294 @@
+/*
+ * IBM OPAL I2C driver
+ * Copyright (C) 2014 IBM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <asm/firmware.h>
+#include <asm/opal.h>
+
+static int i2c_opal_translate_error(int rc)
+{
+       switch (rc) {
+       case OPAL_NO_MEM:
+               return -ENOMEM;
+       case OPAL_PARAMETER:
+               return -EINVAL;
+       case OPAL_I2C_ARBT_LOST:
+               return -EAGAIN;
+       case OPAL_I2C_TIMEOUT:
+               return -ETIMEDOUT;
+       case OPAL_I2C_NACK_RCVD:
+               return -ENXIO;
+       case OPAL_I2C_STOP_ERR:
+               return -EBUSY;
+       default:
+               return -EIO;
+       }
+}
+
+static int i2c_opal_send_request(u32 bus_id, struct opal_i2c_request *req)
+{
+       struct opal_msg msg;
+       int token, rc;
+
+       token = opal_async_get_token_interruptible();
+       if (token < 0) {
+               if (token != -ERESTARTSYS)
+                       pr_err("Failed to get the async token\n");
+
+               return token;
+       }
+
+       rc = opal_i2c_request(token, bus_id, req);
+       if (rc != OPAL_ASYNC_COMPLETION) {
+               rc = i2c_opal_translate_error(rc);
+               goto exit;
+       }
+
+       rc = opal_async_wait_response(token, &msg);
+       if (rc)
+               goto exit;
+
+       rc = be64_to_cpu(msg.params[1]);
+       if (rc != OPAL_SUCCESS) {
+               rc = i2c_opal_translate_error(rc);
+               goto exit;
+       }
+
+exit:
+       opal_async_release_token(token);
+       return rc;
+}
+
+static int i2c_opal_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
+                               int num)
+{
+       unsigned long opal_id = (unsigned long)adap->algo_data;
+       struct opal_i2c_request req;
+       int rc, i;
+
+       /* We only support fairly simple combinations here of one
+        * or two messages
+        */
+       memset(&req, 0, sizeof(req));
+       switch(num) {
+       case 0:
+               return 0;
+       case 1:
+               req.type = (msgs[0].flags & I2C_M_RD) ?
+                       OPAL_I2C_RAW_READ : OPAL_I2C_RAW_WRITE;
+               req.addr = cpu_to_be16(msgs[0].addr);
+               req.size = cpu_to_be32(msgs[0].len);
+               req.buffer_ra = cpu_to_be64(__pa(msgs[0].buf));
+               break;
+       case 2:
+               /* For two messages, we basically support only simple
+                * smbus transactions of a write plus a read. We might
+                * want to allow also two writes but we'd have to bounce
+                * the data into a single buffer.
+                */
+               if ((msgs[0].flags & I2C_M_RD) || !(msgs[1].flags & I2C_M_RD))
+                       return -EOPNOTSUPP;
+               if (msgs[0].len > 4)
+                       return -EOPNOTSUPP;
+               if (msgs[0].addr != msgs[1].addr)
+                       return -EOPNOTSUPP;
+               req.type = OPAL_I2C_SM_READ;
+               req.addr = cpu_to_be16(msgs[0].addr);
+               req.subaddr_sz = msgs[0].len;
+               for (i = 0; i < msgs[0].len; i++)
+                       req.subaddr = (req.subaddr << 8) | msgs[0].buf[i];
+               req.subaddr = cpu_to_be32(req.subaddr);
+               req.size = cpu_to_be32(msgs[1].len);
+               req.buffer_ra = cpu_to_be64(__pa(msgs[1].buf));
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+
+       rc = i2c_opal_send_request(opal_id, &req);
+       if (rc)
+               return rc;
+
+       return num;
+}
+
+static int i2c_opal_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+                              unsigned short flags, char read_write,
+                              u8 command, int size, union i2c_smbus_data *data)
+{
+       unsigned long opal_id = (unsigned long)adap->algo_data;
+       struct opal_i2c_request req;
+       u8 local[2];
+       int rc;
+
+       memset(&req, 0, sizeof(req));
+
+       req.addr = cpu_to_be16(addr);
+       switch (size) {
+       case I2C_SMBUS_BYTE:
+               req.buffer_ra = cpu_to_be64(__pa(&data->byte));
+               req.size = cpu_to_be32(1);
+               /* Fall through */
+       case I2C_SMBUS_QUICK:
+               req.type = (read_write == I2C_SMBUS_READ) ?
+                       OPAL_I2C_RAW_READ : OPAL_I2C_RAW_WRITE;
+               break;
+       case I2C_SMBUS_BYTE_DATA:
+               req.buffer_ra = cpu_to_be64(__pa(&data->byte));
+               req.size = cpu_to_be32(1);
+               req.subaddr = cpu_to_be32(command);
+               req.subaddr_sz = 1;
+               req.type = (read_write == I2C_SMBUS_READ) ?
+                       OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE;
+               break;
+       case I2C_SMBUS_WORD_DATA:
+               if (!read_write) {
+                       local[0] = data->word & 0xff;
+                       local[1] = (data->word >> 8) & 0xff;
+               }
+               req.buffer_ra = cpu_to_be64(__pa(local));
+               req.size = cpu_to_be32(2);
+               req.subaddr = cpu_to_be32(command);
+               req.subaddr_sz = 1;
+               req.type = (read_write == I2C_SMBUS_READ) ?
+                       OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE;
+               break;
+       case I2C_SMBUS_I2C_BLOCK_DATA:
+               req.buffer_ra = cpu_to_be64(__pa(&data->block[1]));
+               req.size = cpu_to_be32(data->block[0]);
+               req.subaddr = cpu_to_be32(command);
+               req.subaddr_sz = 1;
+               req.type = (read_write == I2C_SMBUS_READ) ?
+                       OPAL_I2C_SM_READ : OPAL_I2C_SM_WRITE;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       rc = i2c_opal_send_request(opal_id, &req);
+       if (!rc && read_write && size == I2C_SMBUS_WORD_DATA) {
+               data->word = ((u16)local[1]) << 8;
+               data->word |= local[0];
+       }
+
+       return rc;
+}
+
+static u32 i2c_opal_func(struct i2c_adapter *adapter)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
+              I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
+              I2C_FUNC_SMBUS_I2C_BLOCK;
+}
+
+static const struct i2c_algorithm i2c_opal_algo = {
+       .master_xfer    = i2c_opal_master_xfer,
+       .smbus_xfer     = i2c_opal_smbus_xfer,
+       .functionality  = i2c_opal_func,
+};
+
+static int i2c_opal_probe(struct platform_device *pdev)
+{
+       struct i2c_adapter      *adapter;
+       const char              *pname;
+       u32                     opal_id;
+       int                     rc;
+
+       if (!pdev->dev.of_node)
+               return -ENODEV;
+
+       rc = of_property_read_u32(pdev->dev.of_node, "ibm,opal-id", &opal_id);
+       if (rc) {
+               dev_err(&pdev->dev, "Missing ibm,opal-id property !\n");
+               return -EIO;
+       }
+
+       adapter = devm_kzalloc(&pdev->dev, sizeof(*adapter), GFP_KERNEL);
+       if (!adapter)
+               return -ENOMEM;
+
+       adapter->algo = &i2c_opal_algo;
+       adapter->algo_data = (void *)(unsigned long)opal_id;
+       adapter->dev.parent = &pdev->dev;
+       adapter->dev.of_node = of_node_get(pdev->dev.of_node);
+       pname = of_get_property(pdev->dev.of_node, "ibm,port-name", NULL);
+       if (pname)
+               strlcpy(adapter->name, pname, sizeof(adapter->name));
+       else
+               strlcpy(adapter->name, "opal", sizeof(adapter->name));
+
+       platform_set_drvdata(pdev, adapter);
+       rc = i2c_add_adapter(adapter);
+       if (rc)
+               dev_err(&pdev->dev, "Failed to register the i2c adapter\n");
+
+       return rc;
+}
+
+static int i2c_opal_remove(struct platform_device *pdev)
+{
+       struct i2c_adapter *adapter = platform_get_drvdata(pdev);
+
+       i2c_del_adapter(adapter);
+
+       return 0;
+}
+
+static const struct of_device_id i2c_opal_of_match[] = {
+       {
+               .compatible = "ibm,opal-i2c",
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(of, i2c_opal_of_match);
+
+static struct platform_driver i2c_opal_driver = {
+       .probe  = i2c_opal_probe,
+       .remove = i2c_opal_remove,
+       .driver = {
+               .name           = "i2c-opal",
+               .of_match_table = i2c_opal_of_match,
+       },
+};
+
+static int __init i2c_opal_init(void)
+{
+       if (!firmware_has_feature(FW_FEATURE_OPAL))
+               return -ENODEV;
+
+       return platform_driver_register(&i2c_opal_driver);
+}
+module_init(i2c_opal_init);
+
+static void __exit i2c_opal_exit(void)
+{
+       return platform_driver_unregister(&i2c_opal_driver);
+}
+module_exit(i2c_opal_exit);
+
+MODULE_AUTHOR("Neelesh Gupta <neelegup@linux.vnet.ibm.com>");
+MODULE_DESCRIPTION("IBM OPAL I2C driver");
+MODULE_LICENSE("GPL");
index d7efaf44868b24d9348219c39426811240903a90..440d5dbc8b5f0c90ca3dd4341cb4a4e4858f791e 100644 (file)
@@ -140,6 +140,7 @@ struct sh_mobile_i2c_data {
        int sr;
        bool send_stop;
 
+       struct resource *res;
        struct dma_chan *dma_tx;
        struct dma_chan *dma_rx;
        struct scatterlist sg;
@@ -539,6 +540,42 @@ static void sh_mobile_i2c_dma_callback(void *data)
        iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
 }
 
+static struct dma_chan *sh_mobile_i2c_request_dma_chan(struct device *dev,
+                               enum dma_transfer_direction dir, dma_addr_t port_addr)
+{
+       struct dma_chan *chan;
+       struct dma_slave_config cfg;
+       char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
+       int ret;
+
+       chan = dma_request_slave_channel_reason(dev, chan_name);
+       if (IS_ERR(chan)) {
+               ret = PTR_ERR(chan);
+               dev_dbg(dev, "request_channel failed for %s (%d)\n", chan_name, ret);
+               return chan;
+       }
+
+       memset(&cfg, 0, sizeof(cfg));
+       cfg.direction = dir;
+       if (dir == DMA_MEM_TO_DEV) {
+               cfg.dst_addr = port_addr;
+               cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+       } else {
+               cfg.src_addr = port_addr;
+               cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+       }
+
+       ret = dmaengine_slave_config(chan, &cfg);
+       if (ret) {
+               dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret);
+               dma_release_channel(chan);
+               return ERR_PTR(ret);
+       }
+
+       dev_dbg(dev, "got DMA channel for %s\n", chan_name);
+       return chan;
+}
+
 static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
 {
        bool read = pd->msg->flags & I2C_M_RD;
@@ -548,7 +585,16 @@ static void sh_mobile_i2c_xfer_dma(struct sh_mobile_i2c_data *pd)
        dma_addr_t dma_addr;
        dma_cookie_t cookie;
 
-       if (!chan)
+       if (PTR_ERR(chan) == -EPROBE_DEFER) {
+               if (read)
+                       chan = pd->dma_rx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM,
+                                                                          pd->res->start + ICDR);
+               else
+                       chan = pd->dma_tx = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV,
+                                                                          pd->res->start + ICDR);
+       }
+
+       if (IS_ERR(chan))
                return;
 
        dma_addr = dma_map_single(chan->device->dev, pd->msg->buf, pd->msg->len, dir);
@@ -747,56 +793,16 @@ static const struct of_device_id sh_mobile_i2c_dt_ids[] = {
 };
 MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
 
-static int sh_mobile_i2c_request_dma_chan(struct device *dev, enum dma_transfer_direction dir,
-                                         dma_addr_t port_addr, struct dma_chan **chan_ptr)
-{
-       struct dma_chan *chan;
-       struct dma_slave_config cfg;
-       char *chan_name = dir == DMA_MEM_TO_DEV ? "tx" : "rx";
-       int ret;
-
-       *chan_ptr = NULL;
-
-       chan = dma_request_slave_channel_reason(dev, chan_name);
-       if (IS_ERR(chan)) {
-               ret = PTR_ERR(chan);
-               dev_dbg(dev, "request_channel failed for %s (%d)\n", chan_name, ret);
-               return ret;
-       }
-
-       memset(&cfg, 0, sizeof(cfg));
-       cfg.direction = dir;
-       if (dir == DMA_MEM_TO_DEV) {
-               cfg.dst_addr = port_addr;
-               cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
-       } else {
-               cfg.src_addr = port_addr;
-               cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
-       }
-
-       ret = dmaengine_slave_config(chan, &cfg);
-       if (ret) {
-               dev_dbg(dev, "slave_config failed for %s (%d)\n", chan_name, ret);
-               dma_release_channel(chan);
-               return ret;
-       }
-
-       *chan_ptr = chan;
-
-       dev_dbg(dev, "got DMA channel for %s\n", chan_name);
-       return 0;
-}
-
 static void sh_mobile_i2c_release_dma(struct sh_mobile_i2c_data *pd)
 {
-       if (pd->dma_tx) {
+       if (!IS_ERR(pd->dma_tx)) {
                dma_release_channel(pd->dma_tx);
-               pd->dma_tx = NULL;
+               pd->dma_tx = ERR_PTR(-EPROBE_DEFER);
        }
 
-       if (pd->dma_rx) {
+       if (!IS_ERR(pd->dma_rx)) {
                dma_release_channel(pd->dma_rx);
-               pd->dma_rx = NULL;
+               pd->dma_rx = ERR_PTR(-EPROBE_DEFER);
        }
 }
 
@@ -849,6 +855,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
 
        res = platform_get_resource(dev, IORESOURCE_MEM, 0);
 
+       pd->res = res;
        pd->reg = devm_ioremap_resource(&dev->dev, res);
        if (IS_ERR(pd->reg))
                return PTR_ERR(pd->reg);
@@ -889,17 +896,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
        /* Init DMA */
        sg_init_table(&pd->sg, 1);
        pd->dma_direction = DMA_NONE;
-       ret = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_DEV_TO_MEM,
-                                            res->start + ICDR, &pd->dma_rx);
-       if (ret == -EPROBE_DEFER)
-               return ret;
-
-       ret = sh_mobile_i2c_request_dma_chan(pd->dev, DMA_MEM_TO_DEV,
-                                            res->start + ICDR, &pd->dma_tx);
-       if (ret == -EPROBE_DEFER) {
-               sh_mobile_i2c_release_dma(pd);
-               return ret;
-       }
+       pd->dma_rx = pd->dma_tx = ERR_PTR(-EPROBE_DEFER);
 
        /* Enable Runtime PM for this device.
         *
@@ -937,8 +934,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
                return ret;
        }
 
-       dev_info(&dev->dev, "I2C adapter %d, bus speed %lu Hz, DMA=%c\n",
-                adap->nr, pd->bus_speed, (pd->dma_rx || pd->dma_tx) ? 'y' : 'n');
+       dev_info(&dev->dev, "I2C adapter %d, bus speed %lu Hz\n", adap->nr, pd->bus_speed);
 
        return 0;
 }
index 10641b7816f49e493dd2e1f091d0921d910c2475..dafb3c531f96f7ae61e70ff9f37d9324bee687ff 100644 (file)
@@ -22,7 +22,6 @@
 #include <linux/socket.h>
 #include <linux/in.h>
 #include <linux/in6.h>
-#include <linux/llist.h>
 #include <rdma/ib_verbs.h>
 #include <rdma/rdma_cm.h>
 #include <target/target_core_base.h>
 #define        ISERT_MAX_CONN          8
 #define ISER_MAX_RX_CQ_LEN     (ISERT_QP_MAX_RECV_DTOS * ISERT_MAX_CONN)
 #define ISER_MAX_TX_CQ_LEN     (ISERT_QP_MAX_REQ_DTOS  * ISERT_MAX_CONN)
+#define ISER_MAX_CQ_LEN                (ISER_MAX_RX_CQ_LEN + ISER_MAX_TX_CQ_LEN + \
+                                ISERT_MAX_CONN)
+
+int isert_debug_level = 0;
+module_param_named(debug_level, isert_debug_level, int, 0644);
+MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0 (default:0)");
 
 static DEFINE_MUTEX(device_list_mutex);
 static LIST_HEAD(device_list);
-static struct workqueue_struct *isert_rx_wq;
 static struct workqueue_struct *isert_comp_wq;
+static struct workqueue_struct *isert_release_wq;
 
 static void
 isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
@@ -54,19 +59,32 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
               struct isert_rdma_wr *wr);
 static int
 isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd);
+static int
+isert_rdma_post_recvl(struct isert_conn *isert_conn);
+static int
+isert_rdma_accept(struct isert_conn *isert_conn);
+struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
+
+static inline bool
+isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
+{
+       return (conn->pi_support &&
+               cmd->prot_op != TARGET_PROT_NORMAL);
+}
+
 
 static void
 isert_qp_event_callback(struct ib_event *e, void *context)
 {
        struct isert_conn *isert_conn = (struct isert_conn *)context;
 
-       pr_err("isert_qp_event_callback event: %d\n", e->event);
+       isert_err("conn %p event: %d\n", isert_conn, e->event);
        switch (e->event) {
        case IB_EVENT_COMM_EST:
                rdma_notify(isert_conn->conn_cm_id, IB_EVENT_COMM_EST);
                break;
        case IB_EVENT_QP_LAST_WQE_REACHED:
-               pr_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED:\n");
+               isert_warn("Reached TX IB_EVENT_QP_LAST_WQE_REACHED\n");
                break;
        default:
                break;
@@ -80,39 +98,41 @@ isert_query_device(struct ib_device *ib_dev, struct ib_device_attr *devattr)
 
        ret = ib_query_device(ib_dev, devattr);
        if (ret) {
-               pr_err("ib_query_device() failed: %d\n", ret);
+               isert_err("ib_query_device() failed: %d\n", ret);
                return ret;
        }
-       pr_debug("devattr->max_sge: %d\n", devattr->max_sge);
-       pr_debug("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
+       isert_dbg("devattr->max_sge: %d\n", devattr->max_sge);
+       isert_dbg("devattr->max_sge_rd: %d\n", devattr->max_sge_rd);
 
        return 0;
 }
 
 static int
-isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
-                   u8 protection)
+isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id)
 {
        struct isert_device *device = isert_conn->conn_device;
        struct ib_qp_init_attr attr;
-       int ret, index, min_index = 0;
+       struct isert_comp *comp;
+       int ret, i, min = 0;
 
        mutex_lock(&device_list_mutex);
-       for (index = 0; index < device->cqs_used; index++)
-               if (device->cq_active_qps[index] <
-                   device->cq_active_qps[min_index])
-                       min_index = index;
-       device->cq_active_qps[min_index]++;
-       pr_debug("isert_conn_setup_qp: Using min_index: %d\n", min_index);
+       for (i = 0; i < device->comps_used; i++)
+               if (device->comps[i].active_qps <
+                   device->comps[min].active_qps)
+                       min = i;
+       comp = &device->comps[min];
+       comp->active_qps++;
+       isert_info("conn %p, using comp %p min_index: %d\n",
+                  isert_conn, comp, min);
        mutex_unlock(&device_list_mutex);
 
        memset(&attr, 0, sizeof(struct ib_qp_init_attr));
        attr.event_handler = isert_qp_event_callback;
        attr.qp_context = isert_conn;
-       attr.send_cq = device->dev_tx_cq[min_index];
-       attr.recv_cq = device->dev_rx_cq[min_index];
+       attr.send_cq = comp->cq;
+       attr.recv_cq = comp->cq;
        attr.cap.max_send_wr = ISERT_QP_MAX_REQ_DTOS;
-       attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
+       attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS + 1;
        /*
         * FIXME: Use devattr.max_sge - 2 for max_send_sge as
         * work-around for RDMA_READs with ConnectX-2.
@@ -126,29 +146,29 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
        attr.cap.max_recv_sge = 1;
        attr.sq_sig_type = IB_SIGNAL_REQ_WR;
        attr.qp_type = IB_QPT_RC;
-       if (protection)
+       if (device->pi_capable)
                attr.create_flags |= IB_QP_CREATE_SIGNATURE_EN;
 
-       pr_debug("isert_conn_setup_qp cma_id->device: %p\n",
-                cma_id->device);
-       pr_debug("isert_conn_setup_qp conn_pd->device: %p\n",
-                isert_conn->conn_pd->device);
-
        ret = rdma_create_qp(cma_id, isert_conn->conn_pd, &attr);
        if (ret) {
-               pr_err("rdma_create_qp failed for cma_id %d\n", ret);
-               return ret;
+               isert_err("rdma_create_qp failed for cma_id %d\n", ret);
+               goto err;
        }
        isert_conn->conn_qp = cma_id->qp;
-       pr_debug("rdma_create_qp() returned success >>>>>>>>>>>>>>>>>>>>>>>>>.\n");
 
        return 0;
+err:
+       mutex_lock(&device_list_mutex);
+       comp->active_qps--;
+       mutex_unlock(&device_list_mutex);
+
+       return ret;
 }
 
 static void
 isert_cq_event_callback(struct ib_event *e, void *context)
 {
-       pr_debug("isert_cq_event_callback event: %d\n", e->event);
+       isert_dbg("event: %d\n", e->event);
 }
 
 static int
@@ -182,6 +202,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
        }
 
        isert_conn->conn_rx_desc_head = 0;
+
        return 0;
 
 dma_map_fail:
@@ -193,6 +214,8 @@ dma_map_fail:
        kfree(isert_conn->conn_rx_descs);
        isert_conn->conn_rx_descs = NULL;
 fail:
+       isert_err("conn %p failed to allocate rx descriptors\n", isert_conn);
+
        return -ENOMEM;
 }
 
@@ -216,27 +239,23 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
        isert_conn->conn_rx_descs = NULL;
 }
 
-static void isert_cq_tx_work(struct work_struct *);
-static void isert_cq_tx_callback(struct ib_cq *, void *);
-static void isert_cq_rx_work(struct work_struct *);
-static void isert_cq_rx_callback(struct ib_cq *, void *);
+static void isert_cq_work(struct work_struct *);
+static void isert_cq_callback(struct ib_cq *, void *);
 
 static int
 isert_create_device_ib_res(struct isert_device *device)
 {
        struct ib_device *ib_dev = device->ib_device;
-       struct isert_cq_desc *cq_desc;
        struct ib_device_attr *dev_attr;
-       int ret = 0, i, j;
-       int max_rx_cqe, max_tx_cqe;
+       int ret = 0, i;
+       int max_cqe;
 
        dev_attr = &device->dev_attr;
        ret = isert_query_device(ib_dev, dev_attr);
        if (ret)
                return ret;
 
-       max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
-       max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
+       max_cqe = min(ISER_MAX_CQ_LEN, dev_attr->max_cqe);
 
        /* asign function handlers */
        if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
@@ -254,55 +273,38 @@ isert_create_device_ib_res(struct isert_device *device)
        device->pi_capable = dev_attr->device_cap_flags &
                             IB_DEVICE_SIGNATURE_HANDOVER ? true : false;
 
-       device->cqs_used = min_t(int, num_online_cpus(),
-                                device->ib_device->num_comp_vectors);
-       device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
-       pr_debug("Using %d CQs, device %s supports %d vectors support "
-                "Fast registration %d pi_capable %d\n",
-                device->cqs_used, device->ib_device->name,
-                device->ib_device->num_comp_vectors, device->use_fastreg,
-                device->pi_capable);
-       device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
-                               device->cqs_used, GFP_KERNEL);
-       if (!device->cq_desc) {
-               pr_err("Unable to allocate device->cq_desc\n");
+       device->comps_used = min(ISERT_MAX_CQ, min_t(int, num_online_cpus(),
+                                       device->ib_device->num_comp_vectors));
+       isert_info("Using %d CQs, %s supports %d vectors support "
+                  "Fast registration %d pi_capable %d\n",
+                  device->comps_used, device->ib_device->name,
+                  device->ib_device->num_comp_vectors, device->use_fastreg,
+                  device->pi_capable);
+
+       device->comps = kcalloc(device->comps_used, sizeof(struct isert_comp),
+                               GFP_KERNEL);
+       if (!device->comps) {
+               isert_err("Unable to allocate completion contexts\n");
                return -ENOMEM;
        }
-       cq_desc = device->cq_desc;
-
-       for (i = 0; i < device->cqs_used; i++) {
-               cq_desc[i].device = device;
-               cq_desc[i].cq_index = i;
-
-               INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
-               device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
-                                               isert_cq_rx_callback,
-                                               isert_cq_event_callback,
-                                               (void *)&cq_desc[i],
-                                               max_rx_cqe, i);
-               if (IS_ERR(device->dev_rx_cq[i])) {
-                       ret = PTR_ERR(device->dev_rx_cq[i]);
-                       device->dev_rx_cq[i] = NULL;
-                       goto out_cq;
-               }
 
-               INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
-               device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
-                                               isert_cq_tx_callback,
-                                               isert_cq_event_callback,
-                                               (void *)&cq_desc[i],
-                                               max_tx_cqe, i);
-               if (IS_ERR(device->dev_tx_cq[i])) {
-                       ret = PTR_ERR(device->dev_tx_cq[i]);
-                       device->dev_tx_cq[i] = NULL;
-                       goto out_cq;
-               }
+       for (i = 0; i < device->comps_used; i++) {
+               struct isert_comp *comp = &device->comps[i];
 
-               ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
-               if (ret)
+               comp->device = device;
+               INIT_WORK(&comp->work, isert_cq_work);
+               comp->cq = ib_create_cq(device->ib_device,
+                                       isert_cq_callback,
+                                       isert_cq_event_callback,
+                                       (void *)comp,
+                                       max_cqe, i);
+               if (IS_ERR(comp->cq)) {
+                       ret = PTR_ERR(comp->cq);
+                       comp->cq = NULL;
                        goto out_cq;
+               }
 
-               ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
+               ret = ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
                if (ret)
                        goto out_cq;
        }
@@ -310,19 +312,15 @@ isert_create_device_ib_res(struct isert_device *device)
        return 0;
 
 out_cq:
-       for (j = 0; j < i; j++) {
-               cq_desc = &device->cq_desc[j];
+       for (i = 0; i < device->comps_used; i++) {
+               struct isert_comp *comp = &device->comps[i];
 
-               if (device->dev_rx_cq[j]) {
-                       cancel_work_sync(&cq_desc->cq_rx_work);
-                       ib_destroy_cq(device->dev_rx_cq[j]);
-               }
-               if (device->dev_tx_cq[j]) {
-                       cancel_work_sync(&cq_desc->cq_tx_work);
-                       ib_destroy_cq(device->dev_tx_cq[j]);
+               if (comp->cq) {
+                       cancel_work_sync(&comp->work);
+                       ib_destroy_cq(comp->cq);
                }
        }
-       kfree(device->cq_desc);
+       kfree(device->comps);
 
        return ret;
 }
@@ -330,21 +328,18 @@ out_cq:
 static void
 isert_free_device_ib_res(struct isert_device *device)
 {
-       struct isert_cq_desc *cq_desc;
        int i;
 
-       for (i = 0; i < device->cqs_used; i++) {
-               cq_desc = &device->cq_desc[i];
+       isert_info("device %p\n", device);
 
-               cancel_work_sync(&cq_desc->cq_rx_work);
-               cancel_work_sync(&cq_desc->cq_tx_work);
-               ib_destroy_cq(device->dev_rx_cq[i]);
-               ib_destroy_cq(device->dev_tx_cq[i]);
-               device->dev_rx_cq[i] = NULL;
-               device->dev_tx_cq[i] = NULL;
-       }
+       for (i = 0; i < device->comps_used; i++) {
+               struct isert_comp *comp = &device->comps[i];
 
-       kfree(device->cq_desc);
+               cancel_work_sync(&comp->work);
+               ib_destroy_cq(comp->cq);
+               comp->cq = NULL;
+       }
+       kfree(device->comps);
 }
 
 static void
@@ -352,6 +347,7 @@ isert_device_try_release(struct isert_device *device)
 {
        mutex_lock(&device_list_mutex);
        device->refcount--;
+       isert_info("device %p refcount %d\n", device, device->refcount);
        if (!device->refcount) {
                isert_free_device_ib_res(device);
                list_del(&device->dev_node);
@@ -370,6 +366,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
        list_for_each_entry(device, &device_list, dev_node) {
                if (device->ib_device->node_guid == cma_id->device->node_guid) {
                        device->refcount++;
+                       isert_info("Found iser device %p refcount %d\n",
+                                  device, device->refcount);
                        mutex_unlock(&device_list_mutex);
                        return device;
                }
@@ -393,6 +391,8 @@ isert_device_find_by_ib_dev(struct rdma_cm_id *cma_id)
 
        device->refcount++;
        list_add_tail(&device->dev_node, &device_list);
+       isert_info("Created a new iser device %p refcount %d\n",
+                  device, device->refcount);
        mutex_unlock(&device_list_mutex);
 
        return device;
@@ -407,7 +407,7 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
        if (list_empty(&isert_conn->conn_fr_pool))
                return;
 
-       pr_debug("Freeing conn %p fastreg pool", isert_conn);
+       isert_info("Freeing conn %p fastreg pool", isert_conn);
 
        list_for_each_entry_safe(fr_desc, tmp,
                                 &isert_conn->conn_fr_pool, list) {
@@ -425,87 +425,97 @@ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
        }
 
        if (i < isert_conn->conn_fr_pool_size)
-               pr_warn("Pool still has %d regions registered\n",
+               isert_warn("Pool still has %d regions registered\n",
                        isert_conn->conn_fr_pool_size - i);
 }
 
+static int
+isert_create_pi_ctx(struct fast_reg_descriptor *desc,
+                   struct ib_device *device,
+                   struct ib_pd *pd)
+{
+       struct ib_mr_init_attr mr_init_attr;
+       struct pi_context *pi_ctx;
+       int ret;
+
+       pi_ctx = kzalloc(sizeof(*desc->pi_ctx), GFP_KERNEL);
+       if (!pi_ctx) {
+               isert_err("Failed to allocate pi context\n");
+               return -ENOMEM;
+       }
+
+       pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(device,
+                                           ISCSI_ISER_SG_TABLESIZE);
+       if (IS_ERR(pi_ctx->prot_frpl)) {
+               isert_err("Failed to allocate prot frpl err=%ld\n",
+                         PTR_ERR(pi_ctx->prot_frpl));
+               ret = PTR_ERR(pi_ctx->prot_frpl);
+               goto err_pi_ctx;
+       }
+
+       pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+       if (IS_ERR(pi_ctx->prot_mr)) {
+               isert_err("Failed to allocate prot frmr err=%ld\n",
+                         PTR_ERR(pi_ctx->prot_mr));
+               ret = PTR_ERR(pi_ctx->prot_mr);
+               goto err_prot_frpl;
+       }
+       desc->ind |= ISERT_PROT_KEY_VALID;
+
+       memset(&mr_init_attr, 0, sizeof(mr_init_attr));
+       mr_init_attr.max_reg_descriptors = 2;
+       mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
+       pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
+       if (IS_ERR(pi_ctx->sig_mr)) {
+               isert_err("Failed to allocate signature enabled mr err=%ld\n",
+                         PTR_ERR(pi_ctx->sig_mr));
+               ret = PTR_ERR(pi_ctx->sig_mr);
+               goto err_prot_mr;
+       }
+
+       desc->pi_ctx = pi_ctx;
+       desc->ind |= ISERT_SIG_KEY_VALID;
+       desc->ind &= ~ISERT_PROTECTED;
+
+       return 0;
+
+err_prot_mr:
+       ib_dereg_mr(desc->pi_ctx->prot_mr);
+err_prot_frpl:
+       ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+err_pi_ctx:
+       kfree(desc->pi_ctx);
+
+       return ret;
+}
+
 static int
 isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
-                    struct fast_reg_descriptor *fr_desc, u8 protection)
+                    struct fast_reg_descriptor *fr_desc)
 {
        int ret;
 
        fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
                                                         ISCSI_ISER_SG_TABLESIZE);
        if (IS_ERR(fr_desc->data_frpl)) {
-               pr_err("Failed to allocate data frpl err=%ld\n",
-                      PTR_ERR(fr_desc->data_frpl));
+               isert_err("Failed to allocate data frpl err=%ld\n",
+                         PTR_ERR(fr_desc->data_frpl));
                return PTR_ERR(fr_desc->data_frpl);
        }
 
        fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
        if (IS_ERR(fr_desc->data_mr)) {
-               pr_err("Failed to allocate data frmr err=%ld\n",
-                      PTR_ERR(fr_desc->data_mr));
+               isert_err("Failed to allocate data frmr err=%ld\n",
+                         PTR_ERR(fr_desc->data_mr));
                ret = PTR_ERR(fr_desc->data_mr);
                goto err_data_frpl;
        }
-       pr_debug("Create fr_desc %p page_list %p\n",
-                fr_desc, fr_desc->data_frpl->page_list);
        fr_desc->ind |= ISERT_DATA_KEY_VALID;
 
-       if (protection) {
-               struct ib_mr_init_attr mr_init_attr = {0};
-               struct pi_context *pi_ctx;
-
-               fr_desc->pi_ctx = kzalloc(sizeof(*fr_desc->pi_ctx), GFP_KERNEL);
-               if (!fr_desc->pi_ctx) {
-                       pr_err("Failed to allocate pi context\n");
-                       ret = -ENOMEM;
-                       goto err_data_mr;
-               }
-               pi_ctx = fr_desc->pi_ctx;
-
-               pi_ctx->prot_frpl = ib_alloc_fast_reg_page_list(ib_device,
-                                                   ISCSI_ISER_SG_TABLESIZE);
-               if (IS_ERR(pi_ctx->prot_frpl)) {
-                       pr_err("Failed to allocate prot frpl err=%ld\n",
-                              PTR_ERR(pi_ctx->prot_frpl));
-                       ret = PTR_ERR(pi_ctx->prot_frpl);
-                       goto err_pi_ctx;
-               }
-
-               pi_ctx->prot_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
-               if (IS_ERR(pi_ctx->prot_mr)) {
-                       pr_err("Failed to allocate prot frmr err=%ld\n",
-                              PTR_ERR(pi_ctx->prot_mr));
-                       ret = PTR_ERR(pi_ctx->prot_mr);
-                       goto err_prot_frpl;
-               }
-               fr_desc->ind |= ISERT_PROT_KEY_VALID;
-
-               mr_init_attr.max_reg_descriptors = 2;
-               mr_init_attr.flags |= IB_MR_SIGNATURE_EN;
-               pi_ctx->sig_mr = ib_create_mr(pd, &mr_init_attr);
-               if (IS_ERR(pi_ctx->sig_mr)) {
-                       pr_err("Failed to allocate signature enabled mr err=%ld\n",
-                              PTR_ERR(pi_ctx->sig_mr));
-                       ret = PTR_ERR(pi_ctx->sig_mr);
-                       goto err_prot_mr;
-               }
-               fr_desc->ind |= ISERT_SIG_KEY_VALID;
-       }
-       fr_desc->ind &= ~ISERT_PROTECTED;
+       isert_dbg("Created fr_desc %p\n", fr_desc);
 
        return 0;
-err_prot_mr:
-       ib_dereg_mr(fr_desc->pi_ctx->prot_mr);
-err_prot_frpl:
-       ib_free_fast_reg_page_list(fr_desc->pi_ctx->prot_frpl);
-err_pi_ctx:
-       kfree(fr_desc->pi_ctx);
-err_data_mr:
-       ib_dereg_mr(fr_desc->data_mr);
+
 err_data_frpl:
        ib_free_fast_reg_page_list(fr_desc->data_frpl);
 
@@ -513,7 +523,7 @@ err_data_frpl:
 }
 
 static int
-isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
+isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
 {
        struct fast_reg_descriptor *fr_desc;
        struct isert_device *device = isert_conn->conn_device;
@@ -531,16 +541,15 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
        for (i = 0; i < tag_num; i++) {
                fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
                if (!fr_desc) {
-                       pr_err("Failed to allocate fast_reg descriptor\n");
+                       isert_err("Failed to allocate fast_reg descriptor\n");
                        ret = -ENOMEM;
                        goto err;
                }
 
                ret = isert_create_fr_desc(device->ib_device,
-                                          isert_conn->conn_pd, fr_desc,
-                                          pi_support);
+                                          isert_conn->conn_pd, fr_desc);
                if (ret) {
-                       pr_err("Failed to create fastreg descriptor err=%d\n",
+                       isert_err("Failed to create fastreg descriptor err=%d\n",
                               ret);
                        kfree(fr_desc);
                        goto err;
@@ -550,7 +559,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn, u8 pi_support)
                isert_conn->conn_fr_pool_size++;
        }
 
-       pr_debug("Creating conn %p fastreg pool size=%d",
+       isert_dbg("Creating conn %p fastreg pool size=%d",
                 isert_conn, isert_conn->conn_fr_pool_size);
 
        return 0;
@@ -563,47 +572,45 @@ err:
 static int
 isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 {
-       struct iscsi_np *np = cma_id->context;
-       struct isert_np *isert_np = np->np_context;
+       struct isert_np *isert_np = cma_id->context;
+       struct iscsi_np *np = isert_np->np;
        struct isert_conn *isert_conn;
        struct isert_device *device;
        struct ib_device *ib_dev = cma_id->device;
        int ret = 0;
-       u8 pi_support;
 
        spin_lock_bh(&np->np_thread_lock);
        if (!np->enabled) {
                spin_unlock_bh(&np->np_thread_lock);
-               pr_debug("iscsi_np is not enabled, reject connect request\n");
+               isert_dbg("iscsi_np is not enabled, reject connect request\n");
                return rdma_reject(cma_id, NULL, 0);
        }
        spin_unlock_bh(&np->np_thread_lock);
 
-       pr_debug("Entering isert_connect_request cma_id: %p, context: %p\n",
+       isert_dbg("cma_id: %p, portal: %p\n",
                 cma_id, cma_id->context);
 
        isert_conn = kzalloc(sizeof(struct isert_conn), GFP_KERNEL);
        if (!isert_conn) {
-               pr_err("Unable to allocate isert_conn\n");
+               isert_err("Unable to allocate isert_conn\n");
                return -ENOMEM;
        }
        isert_conn->state = ISER_CONN_INIT;
        INIT_LIST_HEAD(&isert_conn->conn_accept_node);
        init_completion(&isert_conn->conn_login_comp);
+       init_completion(&isert_conn->login_req_comp);
        init_completion(&isert_conn->conn_wait);
-       init_completion(&isert_conn->conn_wait_comp_err);
        kref_init(&isert_conn->conn_kref);
        mutex_init(&isert_conn->conn_mutex);
        spin_lock_init(&isert_conn->conn_lock);
        INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
 
-       cma_id->context = isert_conn;
        isert_conn->conn_cm_id = cma_id;
 
        isert_conn->login_buf = kzalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
                                        ISER_RX_LOGIN_SIZE, GFP_KERNEL);
        if (!isert_conn->login_buf) {
-               pr_err("Unable to allocate isert_conn->login_buf\n");
+               isert_err("Unable to allocate isert_conn->login_buf\n");
                ret = -ENOMEM;
                goto out;
        }
@@ -611,7 +618,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        isert_conn->login_req_buf = isert_conn->login_buf;
        isert_conn->login_rsp_buf = isert_conn->login_buf +
                                    ISCSI_DEF_MAX_RECV_SEG_LEN;
-       pr_debug("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
+       isert_dbg("Set login_buf: %p login_req_buf: %p login_rsp_buf: %p\n",
                 isert_conn->login_buf, isert_conn->login_req_buf,
                 isert_conn->login_rsp_buf);
 
@@ -621,7 +628,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 
        ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
        if (ret) {
-               pr_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
+               isert_err("ib_dma_mapping_error failed for login_req_dma: %d\n",
                       ret);
                isert_conn->login_req_dma = 0;
                goto out_login_buf;
@@ -633,7 +640,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 
        ret = ib_dma_mapping_error(ib_dev, isert_conn->login_rsp_dma);
        if (ret) {
-               pr_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
+               isert_err("ib_dma_mapping_error failed for login_rsp_dma: %d\n",
                       ret);
                isert_conn->login_rsp_dma = 0;
                goto out_req_dma_map;
@@ -649,13 +656,13 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        isert_conn->initiator_depth = min_t(u8,
                                event->param.conn.initiator_depth,
                                device->dev_attr.max_qp_init_rd_atom);
-       pr_debug("Using initiator_depth: %u\n", isert_conn->initiator_depth);
+       isert_dbg("Using initiator_depth: %u\n", isert_conn->initiator_depth);
 
        isert_conn->conn_device = device;
        isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
        if (IS_ERR(isert_conn->conn_pd)) {
                ret = PTR_ERR(isert_conn->conn_pd);
-               pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
+               isert_err("ib_alloc_pd failed for conn %p: ret=%d\n",
                       isert_conn, ret);
                goto out_pd;
        }
@@ -664,20 +671,20 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
                                           IB_ACCESS_LOCAL_WRITE);
        if (IS_ERR(isert_conn->conn_mr)) {
                ret = PTR_ERR(isert_conn->conn_mr);
-               pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
+               isert_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
                       isert_conn, ret);
                goto out_mr;
        }
 
-       pi_support = np->tpg_np->tpg->tpg_attrib.t10_pi;
-       if (pi_support && !device->pi_capable) {
-               pr_err("Protection information requested but not supported, "
-                      "rejecting connect request\n");
-               ret = rdma_reject(cma_id, NULL, 0);
-               goto out_mr;
-       }
+       ret = isert_conn_setup_qp(isert_conn, cma_id);
+       if (ret)
+               goto out_conn_dev;
 
-       ret = isert_conn_setup_qp(isert_conn, cma_id, pi_support);
+       ret = isert_rdma_post_recvl(isert_conn);
+       if (ret)
+               goto out_conn_dev;
+
+       ret = isert_rdma_accept(isert_conn);
        if (ret)
                goto out_conn_dev;
 
@@ -685,7 +692,7 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        list_add_tail(&isert_conn->conn_accept_node, &isert_np->np_accept_list);
        mutex_unlock(&isert_np->np_accept_mutex);
 
-       pr_debug("isert_connect_request() up np_sem np: %p\n", np);
+       isert_info("np %p: Allow accept_np to continue\n", np);
        up(&isert_np->np_sem);
        return 0;
 
@@ -705,6 +712,7 @@ out_login_buf:
        kfree(isert_conn->login_buf);
 out:
        kfree(isert_conn);
+       rdma_reject(cma_id, NULL, 0);
        return ret;
 }
 
@@ -713,24 +721,25 @@ isert_connect_release(struct isert_conn *isert_conn)
 {
        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
        struct isert_device *device = isert_conn->conn_device;
-       int cq_index;
 
-       pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
+       isert_dbg("conn %p\n", isert_conn);
 
        if (device && device->use_fastreg)
                isert_conn_free_fastreg_pool(isert_conn);
 
+       isert_free_rx_descriptors(isert_conn);
+       rdma_destroy_id(isert_conn->conn_cm_id);
+
        if (isert_conn->conn_qp) {
-               cq_index = ((struct isert_cq_desc *)
-                       isert_conn->conn_qp->recv_cq->cq_context)->cq_index;
-               pr_debug("isert_connect_release: cq_index: %d\n", cq_index);
-               isert_conn->conn_device->cq_active_qps[cq_index]--;
+               struct isert_comp *comp = isert_conn->conn_qp->recv_cq->cq_context;
 
-               rdma_destroy_qp(isert_conn->conn_cm_id);
-       }
+               isert_dbg("dec completion context %p active_qps\n", comp);
+               mutex_lock(&device_list_mutex);
+               comp->active_qps--;
+               mutex_unlock(&device_list_mutex);
 
-       isert_free_rx_descriptors(isert_conn);
-       rdma_destroy_id(isert_conn->conn_cm_id);
+               ib_destroy_qp(isert_conn->conn_qp);
+       }
 
        ib_dereg_mr(isert_conn->conn_mr);
        ib_dealloc_pd(isert_conn->conn_pd);
@@ -747,16 +756,24 @@ isert_connect_release(struct isert_conn *isert_conn)
 
        if (device)
                isert_device_try_release(device);
-
-       pr_debug("Leaving isert_connect_release >>>>>>>>>>>>\n");
 }
 
 static void
 isert_connected_handler(struct rdma_cm_id *cma_id)
 {
-       struct isert_conn *isert_conn = cma_id->context;
+       struct isert_conn *isert_conn = cma_id->qp->qp_context;
 
-       kref_get(&isert_conn->conn_kref);
+       isert_info("conn %p\n", isert_conn);
+
+       if (!kref_get_unless_zero(&isert_conn->conn_kref)) {
+               isert_warn("conn %p connect_release is running\n", isert_conn);
+               return;
+       }
+
+       mutex_lock(&isert_conn->conn_mutex);
+       if (isert_conn->state != ISER_CONN_FULL_FEATURE)
+               isert_conn->state = ISER_CONN_UP;
+       mutex_unlock(&isert_conn->conn_mutex);
 }
 
 static void
@@ -765,8 +782,8 @@ isert_release_conn_kref(struct kref *kref)
        struct isert_conn *isert_conn = container_of(kref,
                                struct isert_conn, conn_kref);
 
-       pr_debug("Calling isert_connect_release for final kref %s/%d\n",
-                current->comm, current->pid);
+       isert_info("conn %p final kref %s/%d\n", isert_conn, current->comm,
+                  current->pid);
 
        isert_connect_release(isert_conn);
 }
@@ -777,75 +794,111 @@ isert_put_conn(struct isert_conn *isert_conn)
        kref_put(&isert_conn->conn_kref, isert_release_conn_kref);
 }
 
+/**
+ * isert_conn_terminate() - Initiate connection termination
+ * @isert_conn: isert connection struct
+ *
+ * Notes:
+ * In case the connection state is FULL_FEATURE, move state
+ * to TEMINATING and start teardown sequence (rdma_disconnect).
+ * In case the connection state is UP, complete flush as well.
+ *
+ * This routine must be called with conn_mutex held. Thus it is
+ * safe to call multiple times.
+ */
 static void
-isert_disconnect_work(struct work_struct *work)
+isert_conn_terminate(struct isert_conn *isert_conn)
 {
-       struct isert_conn *isert_conn = container_of(work,
-                               struct isert_conn, conn_logout_work);
+       int err;
 
-       pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
-       mutex_lock(&isert_conn->conn_mutex);
-       if (isert_conn->state == ISER_CONN_UP)
+       switch (isert_conn->state) {
+       case ISER_CONN_TERMINATING:
+               break;
+       case ISER_CONN_UP:
+       case ISER_CONN_FULL_FEATURE: /* FALLTHRU */
+               isert_info("Terminating conn %p state %d\n",
+                          isert_conn, isert_conn->state);
                isert_conn->state = ISER_CONN_TERMINATING;
-
-       if (isert_conn->post_recv_buf_count == 0 &&
-           atomic_read(&isert_conn->post_send_buf_count) == 0) {
-               mutex_unlock(&isert_conn->conn_mutex);
-               goto wake_up;
-       }
-       if (!isert_conn->conn_cm_id) {
-               mutex_unlock(&isert_conn->conn_mutex);
-               isert_put_conn(isert_conn);
-               return;
+               err = rdma_disconnect(isert_conn->conn_cm_id);
+               if (err)
+                       isert_warn("Failed rdma_disconnect isert_conn %p\n",
+                                  isert_conn);
+               break;
+       default:
+               isert_warn("conn %p teminating in state %d\n",
+                          isert_conn, isert_conn->state);
        }
+}
 
-       if (isert_conn->disconnect) {
-               /* Send DREQ/DREP towards our initiator */
-               rdma_disconnect(isert_conn->conn_cm_id);
-       }
+static int
+isert_np_cma_handler(struct isert_np *isert_np,
+                    enum rdma_cm_event_type event)
+{
+       isert_dbg("isert np %p, handling event %d\n", isert_np, event);
 
-       mutex_unlock(&isert_conn->conn_mutex);
+       switch (event) {
+       case RDMA_CM_EVENT_DEVICE_REMOVAL:
+               isert_np->np_cm_id = NULL;
+               break;
+       case RDMA_CM_EVENT_ADDR_CHANGE:
+               isert_np->np_cm_id = isert_setup_id(isert_np);
+               if (IS_ERR(isert_np->np_cm_id)) {
+                       isert_err("isert np %p setup id failed: %ld\n",
+                                 isert_np, PTR_ERR(isert_np->np_cm_id));
+                       isert_np->np_cm_id = NULL;
+               }
+               break;
+       default:
+               isert_err("isert np %p Unexpected event %d\n",
+                         isert_np, event);
+       }
 
-wake_up:
-       complete(&isert_conn->conn_wait);
+       return -1;
 }
 
 static int
-isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
+isert_disconnected_handler(struct rdma_cm_id *cma_id,
+                          enum rdma_cm_event_type event)
 {
+       struct isert_np *isert_np = cma_id->context;
        struct isert_conn *isert_conn;
 
-       if (!cma_id->qp) {
-               struct isert_np *isert_np = cma_id->context;
+       if (isert_np->np_cm_id == cma_id)
+               return isert_np_cma_handler(cma_id->context, event);
 
-               isert_np->np_cm_id = NULL;
-               return -1;
-       }
+       isert_conn = cma_id->qp->qp_context;
 
-       isert_conn = (struct isert_conn *)cma_id->context;
+       mutex_lock(&isert_conn->conn_mutex);
+       isert_conn_terminate(isert_conn);
+       mutex_unlock(&isert_conn->conn_mutex);
 
-       isert_conn->disconnect = disconnect;
-       INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
-       schedule_work(&isert_conn->conn_logout_work);
+       isert_info("conn %p completing conn_wait\n", isert_conn);
+       complete(&isert_conn->conn_wait);
 
        return 0;
 }
 
+static void
+isert_connect_error(struct rdma_cm_id *cma_id)
+{
+       struct isert_conn *isert_conn = cma_id->qp->qp_context;
+
+       isert_put_conn(isert_conn);
+}
+
 static int
 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 {
        int ret = 0;
-       bool disconnect = false;
 
-       pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
-                event->event, event->status, cma_id->context, cma_id);
+       isert_info("event %d status %d id %p np %p\n", event->event,
+                  event->status, cma_id, cma_id->context);
 
        switch (event->event) {
        case RDMA_CM_EVENT_CONNECT_REQUEST:
                ret = isert_connect_request(cma_id, event);
                if (ret)
-                       pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
-                               event->event, ret);
+                       isert_err("failed handle connect request %d\n", ret);
                break;
        case RDMA_CM_EVENT_ESTABLISHED:
                isert_connected_handler(cma_id);
@@ -853,13 +906,16 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
        case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
        case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
        case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
-               disconnect = true;
        case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
-               ret = isert_disconnected_handler(cma_id, disconnect);
+               ret = isert_disconnected_handler(cma_id, event->event);
                break;
+       case RDMA_CM_EVENT_REJECTED:       /* FALLTHRU */
+       case RDMA_CM_EVENT_UNREACHABLE:    /* FALLTHRU */
        case RDMA_CM_EVENT_CONNECT_ERROR:
+               isert_connect_error(cma_id);
+               break;
        default:
-               pr_err("Unhandled RDMA CMA event: %d\n", event->event);
+               isert_err("Unhandled RDMA CMA event: %d\n", event->event);
                break;
        }
 
@@ -876,7 +932,7 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
 
        for (rx_wr = isert_conn->conn_rx_wr, i = 0; i < count; i++, rx_wr++) {
                rx_desc         = &isert_conn->conn_rx_descs[rx_head];
-               rx_wr->wr_id    = (unsigned long)rx_desc;
+               rx_wr->wr_id    = (uintptr_t)rx_desc;
                rx_wr->sg_list  = &rx_desc->rx_sg;
                rx_wr->num_sge  = 1;
                rx_wr->next     = rx_wr + 1;
@@ -890,10 +946,10 @@ isert_post_recv(struct isert_conn *isert_conn, u32 count)
        ret = ib_post_recv(isert_conn->conn_qp, isert_conn->conn_rx_wr,
                                &rx_wr_failed);
        if (ret) {
-               pr_err("ib_post_recv() failed with ret: %d\n", ret);
+               isert_err("ib_post_recv() failed with ret: %d\n", ret);
                isert_conn->post_recv_buf_count -= count;
        } else {
-               pr_debug("isert_post_recv(): Posted %d RX buffers\n", count);
+               isert_dbg("isert_post_recv(): Posted %d RX buffers\n", count);
                isert_conn->conn_rx_desc_head = rx_head;
        }
        return ret;
@@ -910,19 +966,15 @@ isert_post_send(struct isert_conn *isert_conn, struct iser_tx_desc *tx_desc)
                                      ISER_HEADERS_LEN, DMA_TO_DEVICE);
 
        send_wr.next    = NULL;
-       send_wr.wr_id   = (unsigned long)tx_desc;
+       send_wr.wr_id   = (uintptr_t)tx_desc;
        send_wr.sg_list = tx_desc->tx_sg;
        send_wr.num_sge = tx_desc->num_sge;
        send_wr.opcode  = IB_WR_SEND;
        send_wr.send_flags = IB_SEND_SIGNALED;
 
-       atomic_inc(&isert_conn->post_send_buf_count);
-
        ret = ib_post_send(isert_conn->conn_qp, &send_wr, &send_wr_failed);
-       if (ret) {
-               pr_err("ib_post_send() failed, ret: %d\n", ret);
-               atomic_dec(&isert_conn->post_send_buf_count);
-       }
+       if (ret)
+               isert_err("ib_post_send() failed, ret: %d\n", ret);
 
        return ret;
 }
@@ -945,7 +997,7 @@ isert_create_send_desc(struct isert_conn *isert_conn,
 
        if (tx_desc->tx_sg[0].lkey != isert_conn->conn_mr->lkey) {
                tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
-               pr_debug("tx_desc %p lkey mismatch, fixing\n", tx_desc);
+               isert_dbg("tx_desc %p lkey mismatch, fixing\n", tx_desc);
        }
 }
 
@@ -959,7 +1011,7 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
        dma_addr = ib_dma_map_single(ib_dev, (void *)tx_desc,
                        ISER_HEADERS_LEN, DMA_TO_DEVICE);
        if (ib_dma_mapping_error(ib_dev, dma_addr)) {
-               pr_err("ib_dma_mapping_error() failed\n");
+               isert_err("ib_dma_mapping_error() failed\n");
                return -ENOMEM;
        }
 
@@ -968,40 +1020,24 @@ isert_init_tx_hdrs(struct isert_conn *isert_conn,
        tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
        tx_desc->tx_sg[0].lkey = isert_conn->conn_mr->lkey;
 
-       pr_debug("isert_init_tx_hdrs: Setup tx_sg[0].addr: 0x%llx length: %u"
-                " lkey: 0x%08x\n", tx_desc->tx_sg[0].addr,
-                tx_desc->tx_sg[0].length, tx_desc->tx_sg[0].lkey);
+       isert_dbg("Setup tx_sg[0].addr: 0x%llx length: %u lkey: 0x%x\n",
+                 tx_desc->tx_sg[0].addr, tx_desc->tx_sg[0].length,
+                 tx_desc->tx_sg[0].lkey);
 
        return 0;
 }
 
 static void
 isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
-                  struct ib_send_wr *send_wr, bool coalesce)
+                  struct ib_send_wr *send_wr)
 {
        struct iser_tx_desc *tx_desc = &isert_cmd->tx_desc;
 
        isert_cmd->rdma_wr.iser_ib_op = ISER_IB_SEND;
-       send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+       send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
        send_wr->opcode = IB_WR_SEND;
        send_wr->sg_list = &tx_desc->tx_sg[0];
        send_wr->num_sge = isert_cmd->tx_desc.num_sge;
-       /*
-        * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
-        * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
-        */
-       mutex_lock(&isert_conn->conn_mutex);
-       if (coalesce && isert_conn->state == ISER_CONN_UP &&
-           ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
-               tx_desc->llnode_active = true;
-               llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
-               mutex_unlock(&isert_conn->conn_mutex);
-               return;
-       }
-       isert_conn->conn_comp_batch = 0;
-       tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
-       mutex_unlock(&isert_conn->conn_mutex);
-
        send_wr->send_flags = IB_SEND_SIGNALED;
 }
 
@@ -1017,22 +1053,21 @@ isert_rdma_post_recvl(struct isert_conn *isert_conn)
        sge.length = ISER_RX_LOGIN_SIZE;
        sge.lkey = isert_conn->conn_mr->lkey;
 
-       pr_debug("Setup sge: addr: %llx length: %d 0x%08x\n",
+       isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
                sge.addr, sge.length, sge.lkey);
 
        memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
-       rx_wr.wr_id = (unsigned long)isert_conn->login_req_buf;
+       rx_wr.wr_id = (uintptr_t)isert_conn->login_req_buf;
        rx_wr.sg_list = &sge;
        rx_wr.num_sge = 1;
 
        isert_conn->post_recv_buf_count++;
        ret = ib_post_recv(isert_conn->conn_qp, &rx_wr, &rx_wr_fail);
        if (ret) {
-               pr_err("ib_post_recv() failed: %d\n", ret);
+               isert_err("ib_post_recv() failed: %d\n", ret);
                isert_conn->post_recv_buf_count--;
        }
 
-       pr_debug("ib_post_recv(): returned success >>>>>>>>>>>>>>>>>>>>>>>>\n");
        return ret;
 }
 
@@ -1072,13 +1107,9 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
                if (login->login_complete) {
                        if (!conn->sess->sess_ops->SessionType &&
                            isert_conn->conn_device->use_fastreg) {
-                               /* Normal Session and fastreg is used */
-                               u8 pi_support = login->np->tpg_np->tpg->tpg_attrib.t10_pi;
-
-                               ret = isert_conn_create_fastreg_pool(isert_conn,
-                                                                    pi_support);
+                               ret = isert_conn_create_fastreg_pool(isert_conn);
                                if (ret) {
-                                       pr_err("Conn: %p failed to create"
+                                       isert_err("Conn: %p failed to create"
                                               " fastreg pool\n", isert_conn);
                                        return ret;
                                }
@@ -1092,7 +1123,10 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
                        if (ret)
                                return ret;
 
-                       isert_conn->state = ISER_CONN_UP;
+                       /* Now we are in FULL_FEATURE phase */
+                       mutex_lock(&isert_conn->conn_mutex);
+                       isert_conn->state = ISER_CONN_FULL_FEATURE;
+                       mutex_unlock(&isert_conn->conn_mutex);
                        goto post_send;
                }
 
@@ -1109,18 +1143,17 @@ post_send:
 }
 
 static void
-isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
-                  struct isert_conn *isert_conn)
+isert_rx_login_req(struct isert_conn *isert_conn)
 {
+       struct iser_rx_desc *rx_desc = (void *)isert_conn->login_req_buf;
+       int rx_buflen = isert_conn->login_req_len;
        struct iscsi_conn *conn = isert_conn->conn;
        struct iscsi_login *login = conn->conn_login;
        int size;
 
-       if (!login) {
-               pr_err("conn->conn_login is NULL\n");
-               dump_stack();
-               return;
-       }
+       isert_info("conn %p\n", isert_conn);
+
+       WARN_ON_ONCE(!login);
 
        if (login->first_request) {
                struct iscsi_login_req *login_req =
@@ -1146,8 +1179,9 @@ isert_rx_login_req(struct iser_rx_desc *rx_desc, int rx_buflen,
        memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
 
        size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
-       pr_debug("Using login payload size: %d, rx_buflen: %d MAX_KEY_VALUE_PAIRS: %d\n",
-                size, rx_buflen, MAX_KEY_VALUE_PAIRS);
+       isert_dbg("Using login payload size: %d, rx_buflen: %d "
+                 "MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
+                 MAX_KEY_VALUE_PAIRS);
        memcpy(login->req_buf, &rx_desc->data[0], size);
 
        if (login->first_request) {
@@ -1166,7 +1200,7 @@ static struct iscsi_cmd
 
        cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
        if (!cmd) {
-               pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
+               isert_err("Unable to allocate iscsi_cmd + isert_cmd\n");
                return NULL;
        }
        isert_cmd = iscsit_priv_cmd(cmd);
@@ -1209,8 +1243,8 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
        sg = &cmd->se_cmd.t_data_sg[0];
        sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
 
-       pr_debug("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
-                sg, sg_nents, &rx_desc->data[0], imm_data_len);
+       isert_dbg("Copying Immediate SG: %p sg_nents: %u from %p imm_data_len: %d\n",
+                 sg, sg_nents, &rx_desc->data[0], imm_data_len);
 
        sg_copy_from_buffer(sg, sg_nents, &rx_desc->data[0], imm_data_len);
 
@@ -1254,13 +1288,15 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
         * FIXME: Unexpected unsolicited_data out
         */
        if (!cmd->unsolicited_data) {
-               pr_err("Received unexpected solicited data payload\n");
+               isert_err("Received unexpected solicited data payload\n");
                dump_stack();
                return -1;
        }
 
-       pr_debug("Unsolicited DataOut unsol_data_len: %u, write_data_done: %u, data_length: %u\n",
-                unsol_data_len, cmd->write_data_done, cmd->se_cmd.data_length);
+       isert_dbg("Unsolicited DataOut unsol_data_len: %u, "
+                 "write_data_done: %u, data_length: %u\n",
+                 unsol_data_len,  cmd->write_data_done,
+                 cmd->se_cmd.data_length);
 
        sg_off = cmd->write_data_done / PAGE_SIZE;
        sg_start = &cmd->se_cmd.t_data_sg[sg_off];
@@ -1270,12 +1306,13 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
         * FIXME: Non page-aligned unsolicited_data out
         */
        if (page_off) {
-               pr_err("Received unexpected non-page aligned data payload\n");
+               isert_err("unexpected non-page aligned data payload\n");
                dump_stack();
                return -1;
        }
-       pr_debug("Copying DataOut: sg_start: %p, sg_off: %u sg_nents: %u from %p %u\n",
-                sg_start, sg_off, sg_nents, &rx_desc->data[0], unsol_data_len);
+       isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
+                 "sg_nents: %u from %p %u\n", sg_start, sg_off,
+                 sg_nents, &rx_desc->data[0], unsol_data_len);
 
        sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
                            unsol_data_len);
@@ -1322,8 +1359,8 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
 
        text_in = kzalloc(payload_length, GFP_KERNEL);
        if (!text_in) {
-               pr_err("Unable to allocate text_in of payload_length: %u\n",
-                      payload_length);
+               isert_err("Unable to allocate text_in of payload_length: %u\n",
+                         payload_length);
                return -ENOMEM;
        }
        cmd->text_in_ptr = text_in;
@@ -1348,8 +1385,8 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
 
        if (sess->sess_ops->SessionType &&
           (!(opcode & ISCSI_OP_TEXT) || !(opcode & ISCSI_OP_LOGOUT))) {
-               pr_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
-                      " ignoring\n", opcode);
+               isert_err("Got illegal opcode: 0x%02x in SessionType=Discovery,"
+                         " ignoring\n", opcode);
                return 0;
        }
 
@@ -1395,10 +1432,6 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
                        break;
 
                ret = iscsit_handle_logout_cmd(conn, cmd, (unsigned char *)hdr);
-               if (ret > 0)
-                       wait_for_completion_timeout(&conn->conn_logout_comp,
-                                                   SECONDS_FOR_LOGOUT_COMP *
-                                                   HZ);
                break;
        case ISCSI_OP_TEXT:
                cmd = isert_allocate_cmd(conn);
@@ -1410,7 +1443,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
                                            rx_desc, (struct iscsi_text *)hdr);
                break;
        default:
-               pr_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
+               isert_err("Got unknown iSCSI OpCode: 0x%02x\n", opcode);
                dump_stack();
                break;
        }
@@ -1431,23 +1464,23 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
                if (iser_hdr->flags & ISER_RSV) {
                        read_stag = be32_to_cpu(iser_hdr->read_stag);
                        read_va = be64_to_cpu(iser_hdr->read_va);
-                       pr_debug("ISER_RSV: read_stag: 0x%08x read_va: 0x%16llx\n",
-                                read_stag, (unsigned long long)read_va);
+                       isert_dbg("ISER_RSV: read_stag: 0x%x read_va: 0x%llx\n",
+                                 read_stag, (unsigned long long)read_va);
                }
                if (iser_hdr->flags & ISER_WSV) {
                        write_stag = be32_to_cpu(iser_hdr->write_stag);
                        write_va = be64_to_cpu(iser_hdr->write_va);
-                       pr_debug("ISER_WSV: write__stag: 0x%08x write_va: 0x%16llx\n",
-                                write_stag, (unsigned long long)write_va);
+                       isert_dbg("ISER_WSV: write_stag: 0x%x write_va: 0x%llx\n",
+                                 write_stag, (unsigned long long)write_va);
                }
 
-               pr_debug("ISER ISCSI_CTRL PDU\n");
+               isert_dbg("ISER ISCSI_CTRL PDU\n");
                break;
        case ISER_HELLO:
-               pr_err("iSER Hello message\n");
+               isert_err("iSER Hello message\n");
                break;
        default:
-               pr_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
+               isert_warn("Unknown iSER hdr flags: 0x%02x\n", iser_hdr->flags);
                break;
        }
 
@@ -1457,7 +1490,7 @@ isert_rx_do_work(struct iser_rx_desc *rx_desc, struct isert_conn *isert_conn)
 
 static void
 isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
-                   unsigned long xfer_len)
+                   u32 xfer_len)
 {
        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
        struct iscsi_hdr *hdr;
@@ -1467,34 +1500,43 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
        if ((char *)desc == isert_conn->login_req_buf) {
                rx_dma = isert_conn->login_req_dma;
                rx_buflen = ISER_RX_LOGIN_SIZE;
-               pr_debug("ISER login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
+               isert_dbg("login_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
                         rx_dma, rx_buflen);
        } else {
                rx_dma = desc->dma_addr;
                rx_buflen = ISER_RX_PAYLOAD_SIZE;
-               pr_debug("ISER req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
+               isert_dbg("req_buf: Using rx_dma: 0x%llx, rx_buflen: %d\n",
                         rx_dma, rx_buflen);
        }
 
        ib_dma_sync_single_for_cpu(ib_dev, rx_dma, rx_buflen, DMA_FROM_DEVICE);
 
        hdr = &desc->iscsi_header;
-       pr_debug("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
+       isert_dbg("iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
                 hdr->opcode, hdr->itt, hdr->flags,
                 (int)(xfer_len - ISER_HEADERS_LEN));
 
-       if ((char *)desc == isert_conn->login_req_buf)
-               isert_rx_login_req(desc, xfer_len - ISER_HEADERS_LEN,
-                                  isert_conn);
-       else
+       if ((char *)desc == isert_conn->login_req_buf) {
+               isert_conn->login_req_len = xfer_len - ISER_HEADERS_LEN;
+               if (isert_conn->conn) {
+                       struct iscsi_login *login = isert_conn->conn->conn_login;
+
+                       if (login && !login->first_request)
+                               isert_rx_login_req(isert_conn);
+               }
+               mutex_lock(&isert_conn->conn_mutex);
+               complete(&isert_conn->login_req_comp);
+               mutex_unlock(&isert_conn->conn_mutex);
+       } else {
                isert_rx_do_work(desc, isert_conn);
+       }
 
        ib_dma_sync_single_for_device(ib_dev, rx_dma, rx_buflen,
                                      DMA_FROM_DEVICE);
 
        isert_conn->post_recv_buf_count--;
-       pr_debug("iSERT: Decremented post_recv_buf_count: %d\n",
-                isert_conn->post_recv_buf_count);
+       isert_dbg("Decremented post_recv_buf_count: %d\n",
+                 isert_conn->post_recv_buf_count);
 
        if ((char *)desc == isert_conn->login_req_buf)
                return;
@@ -1505,7 +1547,7 @@ isert_rx_completion(struct iser_rx_desc *desc, struct isert_conn *isert_conn,
                                ISERT_MIN_POSTED_RX);
                err = isert_post_recv(isert_conn, count);
                if (err) {
-                       pr_err("isert_post_recv() count: %d failed, %d\n",
+                       isert_err("isert_post_recv() count: %d failed, %d\n",
                               count, err);
                }
        }
@@ -1534,12 +1576,12 @@ isert_map_data_buf(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
        data->dma_nents = ib_dma_map_sg(ib_dev, data->sg, data->nents,
                                        data->dma_dir);
        if (unlikely(!data->dma_nents)) {
-               pr_err("Cmd: unable to dma map SGs %p\n", sg);
+               isert_err("Cmd: unable to dma map SGs %p\n", sg);
                return -EINVAL;
        }
 
-       pr_debug("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
-                isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
+       isert_dbg("Mapped cmd: %p count: %u sg: %p sg_nents: %u rdma_len %d\n",
+                 isert_cmd, data->dma_nents, data->sg, data->nents, data->len);
 
        return 0;
 }
@@ -1560,21 +1602,21 @@ isert_unmap_cmd(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
 {
        struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
 
-       pr_debug("isert_unmap_cmd: %p\n", isert_cmd);
+       isert_dbg("Cmd %p\n", isert_cmd);
 
        if (wr->data.sg) {
-               pr_debug("isert_unmap_cmd: %p unmap_sg op\n", isert_cmd);
+               isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
                isert_unmap_data_buf(isert_conn, &wr->data);
        }
 
        if (wr->send_wr) {
-               pr_debug("isert_unmap_cmd: %p free send_wr\n", isert_cmd);
+               isert_dbg("Cmd %p free send_wr\n", isert_cmd);
                kfree(wr->send_wr);
                wr->send_wr = NULL;
        }
 
        if (wr->ib_sge) {
-               pr_debug("isert_unmap_cmd: %p free ib_sge\n", isert_cmd);
+               isert_dbg("Cmd %p free ib_sge\n", isert_cmd);
                kfree(wr->ib_sge);
                wr->ib_sge = NULL;
        }
@@ -1586,11 +1628,10 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
        struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
        LIST_HEAD(unmap_list);
 
-       pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
+       isert_dbg("Cmd %p\n", isert_cmd);
 
        if (wr->fr_desc) {
-               pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
-                        isert_cmd, wr->fr_desc);
+               isert_dbg("Cmd %p free fr_desc %p\n", isert_cmd, wr->fr_desc);
                if (wr->fr_desc->ind & ISERT_PROTECTED) {
                        isert_unmap_data_buf(isert_conn, &wr->prot);
                        wr->fr_desc->ind &= ~ISERT_PROTECTED;
@@ -1602,7 +1643,7 @@ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
        }
 
        if (wr->data.sg) {
-               pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
+               isert_dbg("Cmd %p unmap_sg op\n", isert_cmd);
                isert_unmap_data_buf(isert_conn, &wr->data);
        }
 
@@ -1618,7 +1659,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
        struct iscsi_conn *conn = isert_conn->conn;
        struct isert_device *device = isert_conn->conn_device;
 
-       pr_debug("Entering isert_put_cmd: %p\n", isert_cmd);
+       isert_dbg("Cmd %p\n", isert_cmd);
 
        switch (cmd->iscsi_opcode) {
        case ISCSI_OP_SCSI_CMD:
@@ -1668,7 +1709,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd, bool comp_err)
                 * associated cmd->se_cmd needs to be released.
                 */
                if (cmd->se_cmd.se_tfo != NULL) {
-                       pr_debug("Calling transport_generic_free_cmd from"
+                       isert_dbg("Calling transport_generic_free_cmd from"
                                 " isert_put_cmd for 0x%02x\n",
                                 cmd->iscsi_opcode);
                        transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1687,7 +1728,7 @@ static void
 isert_unmap_tx_desc(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev)
 {
        if (tx_desc->dma_addr != 0) {
-               pr_debug("Calling ib_dma_unmap_single for tx_desc->dma_addr\n");
+               isert_dbg("unmap single for tx_desc->dma_addr\n");
                ib_dma_unmap_single(ib_dev, tx_desc->dma_addr,
                                    ISER_HEADERS_LEN, DMA_TO_DEVICE);
                tx_desc->dma_addr = 0;
@@ -1699,7 +1740,7 @@ isert_completion_put(struct iser_tx_desc *tx_desc, struct isert_cmd *isert_cmd,
                     struct ib_device *ib_dev, bool comp_err)
 {
        if (isert_cmd->pdu_buf_dma != 0) {
-               pr_debug("Calling ib_dma_unmap_single for isert_cmd->pdu_buf_dma\n");
+               isert_dbg("unmap single for isert_cmd->pdu_buf_dma\n");
                ib_dma_unmap_single(ib_dev, isert_cmd->pdu_buf_dma,
                                    isert_cmd->pdu_buf_len, DMA_TO_DEVICE);
                isert_cmd->pdu_buf_dma = 0;
@@ -1717,7 +1758,7 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
 
        ret = ib_check_mr_status(sig_mr, IB_MR_CHECK_SIG_STATUS, &mr_status);
        if (ret) {
-               pr_err("ib_check_mr_status failed, ret %d\n", ret);
+               isert_err("ib_check_mr_status failed, ret %d\n", ret);
                goto fail_mr_status;
        }
 
@@ -1740,12 +1781,12 @@ isert_check_pi_status(struct se_cmd *se_cmd, struct ib_mr *sig_mr)
                do_div(sec_offset_err, block_size);
                se_cmd->bad_sector = sec_offset_err + se_cmd->t_task_lba;
 
-               pr_err("isert: PI error found type %d at sector 0x%llx "
-                      "expected 0x%x vs actual 0x%x\n",
-                      mr_status.sig_err.err_type,
-                      (unsigned long long)se_cmd->bad_sector,
-                      mr_status.sig_err.expected,
-                      mr_status.sig_err.actual);
+               isert_err("PI error found type %d at sector 0x%llx "
+                         "expected 0x%x vs actual 0x%x\n",
+                         mr_status.sig_err.err_type,
+                         (unsigned long long)se_cmd->bad_sector,
+                         mr_status.sig_err.expected,
+                         mr_status.sig_err.actual);
                ret = 1;
        }
 
@@ -1801,7 +1842,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
        cmd->write_data_done = wr->data.len;
        wr->send_wr_num = 0;
 
-       pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
+       isert_dbg("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
        spin_lock_bh(&cmd->istate_lock);
        cmd->cmd_flags |= ICF_GOT_LAST_DATAOUT;
        cmd->i_state = ISTATE_RECEIVED_LAST_DATAOUT;
@@ -1823,36 +1864,22 @@ isert_do_control_comp(struct work_struct *work)
        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
        struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
 
+       isert_dbg("Cmd %p i_state %d\n", isert_cmd, cmd->i_state);
+
        switch (cmd->i_state) {
        case ISTATE_SEND_TASKMGTRSP:
-               pr_debug("Calling iscsit_tmr_post_handler >>>>>>>>>>>>>>>>>\n");
-
-               atomic_dec(&isert_conn->post_send_buf_count);
                iscsit_tmr_post_handler(cmd, cmd->conn);
-
-               cmd->i_state = ISTATE_SENT_STATUS;
-               isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
-               break;
-       case ISTATE_SEND_REJECT:
-               pr_debug("Got isert_do_control_comp ISTATE_SEND_REJECT: >>>\n");
-               atomic_dec(&isert_conn->post_send_buf_count);
-
+       case ISTATE_SEND_REJECT:   /* FALLTHRU */
+       case ISTATE_SEND_TEXTRSP:  /* FALLTHRU */
                cmd->i_state = ISTATE_SENT_STATUS;
-               isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
+               isert_completion_put(&isert_cmd->tx_desc, isert_cmd,
+                                    ib_dev, false);
                break;
        case ISTATE_SEND_LOGOUTRSP:
-               pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
-
-               atomic_dec(&isert_conn->post_send_buf_count);
                iscsit_logout_post_handler(cmd, cmd->conn);
                break;
-       case ISTATE_SEND_TEXTRSP:
-               atomic_dec(&isert_conn->post_send_buf_count);
-               cmd->i_state = ISTATE_SENT_STATUS;
-               isert_completion_put(&isert_cmd->tx_desc, isert_cmd, ib_dev, false);
-               break;
        default:
-               pr_err("Unknown do_control_comp i_state %d\n", cmd->i_state);
+               isert_err("Unknown i_state %d\n", cmd->i_state);
                dump_stack();
                break;
        }
@@ -1865,7 +1892,6 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
                          struct ib_device *ib_dev)
 {
        struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
-       struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
 
        if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
            cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1878,267 +1904,151 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
                return;
        }
 
-       /**
-        * If send_wr_num is 0 this means that we got
-        * RDMA completion and we cleared it and we should
-        * simply decrement the response post. else the
-        * response is incorporated in send_wr_num, just
-        * sub it.
-        **/
-       if (wr->send_wr_num)
-               atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
-       else
-               atomic_dec(&isert_conn->post_send_buf_count);
-
        cmd->i_state = ISTATE_SENT_STATUS;
        isert_completion_put(tx_desc, isert_cmd, ib_dev, false);
 }
 
 static void
-__isert_send_completion(struct iser_tx_desc *tx_desc,
-                       struct isert_conn *isert_conn)
+isert_send_completion(struct iser_tx_desc *tx_desc,
+                     struct isert_conn *isert_conn)
 {
        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
        struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
        struct isert_rdma_wr *wr;
 
        if (!isert_cmd) {
-               atomic_dec(&isert_conn->post_send_buf_count);
                isert_unmap_tx_desc(tx_desc, ib_dev);
                return;
        }
        wr = &isert_cmd->rdma_wr;
 
+       isert_dbg("Cmd %p iser_ib_op %d\n", isert_cmd, wr->iser_ib_op);
+
        switch (wr->iser_ib_op) {
        case ISER_IB_RECV:
-               pr_err("isert_send_completion: Got ISER_IB_RECV\n");
+               isert_err("Got ISER_IB_RECV\n");
                dump_stack();
                break;
        case ISER_IB_SEND:
-               pr_debug("isert_send_completion: Got ISER_IB_SEND\n");
                isert_response_completion(tx_desc, isert_cmd,
                                          isert_conn, ib_dev);
                break;
        case ISER_IB_RDMA_WRITE:
-               pr_debug("isert_send_completion: Got ISER_IB_RDMA_WRITE\n");
-               atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
                isert_completion_rdma_write(tx_desc, isert_cmd);
                break;
        case ISER_IB_RDMA_READ:
-               pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
-
-               atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
                isert_completion_rdma_read(tx_desc, isert_cmd);
                break;
        default:
-               pr_err("Unknown wr->iser_ib_op: 0x%02x\n", wr->iser_ib_op);
+               isert_err("Unknown wr->iser_ib_op: 0x%x\n", wr->iser_ib_op);
                dump_stack();
                break;
        }
 }
 
-static void
-isert_send_completion(struct iser_tx_desc *tx_desc,
-                     struct isert_conn *isert_conn)
-{
-       struct llist_node *llnode = tx_desc->comp_llnode_batch;
-       struct iser_tx_desc *t;
-       /*
-        * Drain coalesced completion llist starting from comp_llnode_batch
-        * setup in isert_init_send_wr(), and then complete trailing tx_desc.
-        */
-       while (llnode) {
-               t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
-               llnode = llist_next(llnode);
-               __isert_send_completion(t, isert_conn);
-       }
-       __isert_send_completion(tx_desc, isert_conn);
-}
-
-static void
-isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
+/**
+ * is_isert_tx_desc() - Indicate if the completion wr_id
+ *     is a TX descriptor or not.
+ * @isert_conn: iser connection
+ * @wr_id: completion WR identifier
+ *
+ * Since we cannot rely on wc opcode in FLUSH errors
+ * we must work around it by checking if the wr_id address
+ * falls in the iser connection rx_descs buffer. If so
+ * it is an RX descriptor, otherwize it is a TX.
+ */
+static inline bool
+is_isert_tx_desc(struct isert_conn *isert_conn, void *wr_id)
 {
-       struct llist_node *llnode;
-       struct isert_rdma_wr *wr;
-       struct iser_tx_desc *t;
+       void *start = isert_conn->conn_rx_descs;
+       int len = ISERT_QP_MAX_RECV_DTOS * sizeof(*isert_conn->conn_rx_descs);
 
-       mutex_lock(&isert_conn->conn_mutex);
-       llnode = llist_del_all(&isert_conn->conn_comp_llist);
-       isert_conn->conn_comp_batch = 0;
-       mutex_unlock(&isert_conn->conn_mutex);
-
-       while (llnode) {
-               t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
-               llnode = llist_next(llnode);
-               wr = &t->isert_cmd->rdma_wr;
-
-               /**
-                * If send_wr_num is 0 this means that we got
-                * RDMA completion and we cleared it and we should
-                * simply decrement the response post. else the
-                * response is incorporated in send_wr_num, just
-                * sub it.
-                **/
-               if (wr->send_wr_num)
-                       atomic_sub(wr->send_wr_num,
-                                  &isert_conn->post_send_buf_count);
-               else
-                       atomic_dec(&isert_conn->post_send_buf_count);
+       if (wr_id >= start && wr_id < start + len)
+               return false;
 
-               isert_completion_put(t, t->isert_cmd, ib_dev, true);
-       }
+       return true;
 }
 
 static void
-isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
+isert_cq_comp_err(struct isert_conn *isert_conn, struct ib_wc *wc)
 {
-       struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
-       struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
-       struct llist_node *llnode = tx_desc->comp_llnode_batch;
-       struct isert_rdma_wr *wr;
-       struct iser_tx_desc *t;
-
-       while (llnode) {
-               t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
-               llnode = llist_next(llnode);
-               wr = &t->isert_cmd->rdma_wr;
+       if (wc->wr_id == ISER_BEACON_WRID) {
+               isert_info("conn %p completing conn_wait_comp_err\n",
+                          isert_conn);
+               complete(&isert_conn->conn_wait_comp_err);
+       } else if (is_isert_tx_desc(isert_conn, (void *)(uintptr_t)wc->wr_id)) {
+               struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+               struct isert_cmd *isert_cmd;
+               struct iser_tx_desc *desc;
 
-               /**
-                * If send_wr_num is 0 this means that we got
-                * RDMA completion and we cleared it and we should
-                * simply decrement the response post. else the
-                * response is incorporated in send_wr_num, just
-                * sub it.
-                **/
-               if (wr->send_wr_num)
-                       atomic_sub(wr->send_wr_num,
-                                  &isert_conn->post_send_buf_count);
+               desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
+               isert_cmd = desc->isert_cmd;
+               if (!isert_cmd)
+                       isert_unmap_tx_desc(desc, ib_dev);
                else
-                       atomic_dec(&isert_conn->post_send_buf_count);
-
-               isert_completion_put(t, t->isert_cmd, ib_dev, true);
-       }
-       tx_desc->comp_llnode_batch = NULL;
-
-       if (!isert_cmd)
-               isert_unmap_tx_desc(tx_desc, ib_dev);
-       else
-               isert_completion_put(tx_desc, isert_cmd, ib_dev, true);
-}
-
-static void
-isert_cq_rx_comp_err(struct isert_conn *isert_conn)
-{
-       struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
-       struct iscsi_conn *conn = isert_conn->conn;
-
-       if (isert_conn->post_recv_buf_count)
-               return;
-
-       isert_cq_drain_comp_llist(isert_conn, ib_dev);
-
-       if (conn->sess) {
-               target_sess_cmd_list_set_waiting(conn->sess->se_sess);
-               target_wait_for_sess_cmds(conn->sess->se_sess);
+                       isert_completion_put(desc, isert_cmd, ib_dev, true);
+       } else {
+               isert_conn->post_recv_buf_count--;
+               if (!isert_conn->post_recv_buf_count)
+                       iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
        }
-
-       while (atomic_read(&isert_conn->post_send_buf_count))
-               msleep(3000);
-
-       mutex_lock(&isert_conn->conn_mutex);
-       isert_conn->state = ISER_CONN_DOWN;
-       mutex_unlock(&isert_conn->conn_mutex);
-
-       iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
-
-       complete(&isert_conn->conn_wait_comp_err);
 }
 
 static void
-isert_cq_tx_work(struct work_struct *work)
+isert_handle_wc(struct ib_wc *wc)
 {
-       struct isert_cq_desc *cq_desc = container_of(work,
-                               struct isert_cq_desc, cq_tx_work);
-       struct isert_device *device = cq_desc->device;
-       int cq_index = cq_desc->cq_index;
-       struct ib_cq *tx_cq = device->dev_tx_cq[cq_index];
        struct isert_conn *isert_conn;
        struct iser_tx_desc *tx_desc;
-       struct ib_wc wc;
-
-       while (ib_poll_cq(tx_cq, 1, &wc) == 1) {
-               tx_desc = (struct iser_tx_desc *)(unsigned long)wc.wr_id;
-               isert_conn = wc.qp->qp_context;
+       struct iser_rx_desc *rx_desc;
 
-               if (wc.status == IB_WC_SUCCESS) {
-                       isert_send_completion(tx_desc, isert_conn);
+       isert_conn = wc->qp->qp_context;
+       if (likely(wc->status == IB_WC_SUCCESS)) {
+               if (wc->opcode == IB_WC_RECV) {
+                       rx_desc = (struct iser_rx_desc *)(uintptr_t)wc->wr_id;
+                       isert_rx_completion(rx_desc, isert_conn, wc->byte_len);
                } else {
-                       pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
-                       pr_debug("TX wc.status: 0x%08x\n", wc.status);
-                       pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
-
-                       if (wc.wr_id != ISER_FASTREG_LI_WRID) {
-                               if (tx_desc->llnode_active)
-                                       continue;
-
-                               atomic_dec(&isert_conn->post_send_buf_count);
-                               isert_cq_tx_comp_err(tx_desc, isert_conn);
-                       }
+                       tx_desc = (struct iser_tx_desc *)(uintptr_t)wc->wr_id;
+                       isert_send_completion(tx_desc, isert_conn);
                }
-       }
-
-       ib_req_notify_cq(tx_cq, IB_CQ_NEXT_COMP);
-}
-
-static void
-isert_cq_tx_callback(struct ib_cq *cq, void *context)
-{
-       struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
+       } else {
+               if (wc->status != IB_WC_WR_FLUSH_ERR)
+                       isert_err("wr id %llx status %d vend_err %x\n",
+                                 wc->wr_id, wc->status, wc->vendor_err);
+               else
+                       isert_dbg("flush error: wr id %llx\n", wc->wr_id);
 
-       queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
+               if (wc->wr_id != ISER_FASTREG_LI_WRID)
+                       isert_cq_comp_err(isert_conn, wc);
+       }
 }
 
 static void
-isert_cq_rx_work(struct work_struct *work)
+isert_cq_work(struct work_struct *work)
 {
-       struct isert_cq_desc *cq_desc = container_of(work,
-                       struct isert_cq_desc, cq_rx_work);
-       struct isert_device *device = cq_desc->device;
-       int cq_index = cq_desc->cq_index;
-       struct ib_cq *rx_cq = device->dev_rx_cq[cq_index];
-       struct isert_conn *isert_conn;
-       struct iser_rx_desc *rx_desc;
-       struct ib_wc wc;
-       unsigned long xfer_len;
+       enum { isert_poll_budget = 65536 };
+       struct isert_comp *comp = container_of(work, struct isert_comp,
+                                              work);
+       struct ib_wc *const wcs = comp->wcs;
+       int i, n, completed = 0;
 
-       while (ib_poll_cq(rx_cq, 1, &wc) == 1) {
-               rx_desc = (struct iser_rx_desc *)(unsigned long)wc.wr_id;
-               isert_conn = wc.qp->qp_context;
+       while ((n = ib_poll_cq(comp->cq, ARRAY_SIZE(comp->wcs), wcs)) > 0) {
+               for (i = 0; i < n; i++)
+                       isert_handle_wc(&wcs[i]);
 
-               if (wc.status == IB_WC_SUCCESS) {
-                       xfer_len = (unsigned long)wc.byte_len;
-                       isert_rx_completion(rx_desc, isert_conn, xfer_len);
-               } else {
-                       pr_debug("RX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
-                       if (wc.status != IB_WC_WR_FLUSH_ERR) {
-                               pr_debug("RX wc.status: 0x%08x\n", wc.status);
-                               pr_debug("RX wc.vendor_err: 0x%08x\n",
-                                        wc.vendor_err);
-                       }
-                       isert_conn->post_recv_buf_count--;
-                       isert_cq_rx_comp_err(isert_conn);
-               }
+               completed += n;
+               if (completed >= isert_poll_budget)
+                       break;
        }
 
-       ib_req_notify_cq(rx_cq, IB_CQ_NEXT_COMP);
+       ib_req_notify_cq(comp->cq, IB_CQ_NEXT_COMP);
 }
 
 static void
-isert_cq_rx_callback(struct ib_cq *cq, void *context)
+isert_cq_callback(struct ib_cq *cq, void *context)
 {
-       struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
+       struct isert_comp *comp = context;
 
-       queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
+       queue_work(isert_comp_wq, &comp->work);
 }
 
 static int
@@ -2147,13 +2057,10 @@ isert_post_response(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd)
        struct ib_send_wr *wr_failed;
        int ret;
 
-       atomic_inc(&isert_conn->post_send_buf_count);
-
        ret = ib_post_send(isert_conn->conn_qp, &isert_cmd->tx_desc.send_wr,
                           &wr_failed);
        if (ret) {
-               pr_err("ib_post_send failed with %d\n", ret);
-               atomic_dec(&isert_conn->post_send_buf_count);
+               isert_err("ib_post_send failed with %d\n", ret);
                return ret;
        }
        return ret;
@@ -2200,9 +2107,9 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
                isert_cmd->tx_desc.num_sge = 2;
        }
 
-       isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr);
 
-       pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+       isert_dbg("Posting SCSI Response\n");
 
        return isert_post_response(isert_conn, isert_cmd);
 }
@@ -2231,8 +2138,16 @@ isert_get_sup_prot_ops(struct iscsi_conn *conn)
        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
        struct isert_device *device = isert_conn->conn_device;
 
-       if (device->pi_capable)
-               return TARGET_PROT_ALL;
+       if (conn->tpg->tpg_attrib.t10_pi) {
+               if (device->pi_capable) {
+                       isert_info("conn %p PI offload enabled\n", isert_conn);
+                       isert_conn->pi_support = true;
+                       return TARGET_PROT_ALL;
+               }
+       }
+
+       isert_info("conn %p PI offload disabled\n", isert_conn);
+       isert_conn->pi_support = false;
 
        return TARGET_PROT_NORMAL;
 }
@@ -2250,9 +2165,9 @@ isert_put_nopin(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
                               &isert_cmd->tx_desc.iscsi_header,
                               nopout_response);
        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
-       isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr);
 
-       pr_debug("Posting NOPIN Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+       isert_dbg("conn %p Posting NOPIN Response\n", isert_conn);
 
        return isert_post_response(isert_conn, isert_cmd);
 }
@@ -2268,9 +2183,9 @@ isert_put_logout_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        iscsit_build_logout_rsp(cmd, conn, (struct iscsi_logout_rsp *)
                                &isert_cmd->tx_desc.iscsi_header);
        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
-       isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr);
 
-       pr_debug("Posting Logout Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+       isert_dbg("conn %p Posting Logout Response\n", isert_conn);
 
        return isert_post_response(isert_conn, isert_cmd);
 }
@@ -2286,9 +2201,9 @@ isert_put_tm_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        iscsit_build_task_mgt_rsp(cmd, conn, (struct iscsi_tm_rsp *)
                                  &isert_cmd->tx_desc.iscsi_header);
        isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
-       isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr);
 
-       pr_debug("Posting Task Management Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+       isert_dbg("conn %p Posting Task Management Response\n", isert_conn);
 
        return isert_post_response(isert_conn, isert_cmd);
 }
@@ -2318,9 +2233,9 @@ isert_put_reject(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
        tx_dsg->lkey    = isert_conn->conn_mr->lkey;
        isert_cmd->tx_desc.num_sge = 2;
 
-       isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr);
 
-       pr_debug("Posting Reject IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+       isert_dbg("conn %p Posting Reject\n", isert_conn);
 
        return isert_post_response(isert_conn, isert_cmd);
 }
@@ -2358,9 +2273,9 @@ isert_put_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn)
                tx_dsg->lkey    = isert_conn->conn_mr->lkey;
                isert_cmd->tx_desc.num_sge = 2;
        }
-       isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
+       isert_init_send_wr(isert_conn, isert_cmd, send_wr);
 
-       pr_debug("Posting Text Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
+       isert_dbg("conn %p Text Reject\n", isert_conn);
 
        return isert_post_response(isert_conn, isert_cmd);
 }
@@ -2383,30 +2298,31 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
 
        send_wr->sg_list = ib_sge;
        send_wr->num_sge = sg_nents;
-       send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+       send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
        /*
         * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
         */
        for_each_sg(sg_start, tmp_sg, sg_nents, i) {
-               pr_debug("ISER RDMA from SGL dma_addr: 0x%16llx dma_len: %u, page_off: %u\n",
-                        (unsigned long long)tmp_sg->dma_address,
-                        tmp_sg->length, page_off);
+               isert_dbg("RDMA from SGL dma_addr: 0x%llx dma_len: %u, "
+                         "page_off: %u\n",
+                         (unsigned long long)tmp_sg->dma_address,
+                         tmp_sg->length, page_off);
 
                ib_sge->addr = ib_sg_dma_address(ib_dev, tmp_sg) + page_off;
                ib_sge->length = min_t(u32, data_left,
                                ib_sg_dma_len(ib_dev, tmp_sg) - page_off);
                ib_sge->lkey = isert_conn->conn_mr->lkey;
 
-               pr_debug("RDMA ib_sge: addr: 0x%16llx  length: %u lkey: %08x\n",
-                        ib_sge->addr, ib_sge->length, ib_sge->lkey);
+               isert_dbg("RDMA ib_sge: addr: 0x%llx  length: %u lkey: %x\n",
+                         ib_sge->addr, ib_sge->length, ib_sge->lkey);
                page_off = 0;
                data_left -= ib_sge->length;
                ib_sge++;
-               pr_debug("Incrementing ib_sge pointer to %p\n", ib_sge);
+               isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
        }
 
-       pr_debug("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
-                send_wr->sg_list, send_wr->num_sge);
+       isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
+                 send_wr->sg_list, send_wr->num_sge);
 
        return sg_nents;
 }
@@ -2438,7 +2354,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
 
        ib_sge = kzalloc(sizeof(struct ib_sge) * data->nents, GFP_KERNEL);
        if (!ib_sge) {
-               pr_warn("Unable to allocate ib_sge\n");
+               isert_warn("Unable to allocate ib_sge\n");
                ret = -ENOMEM;
                goto unmap_cmd;
        }
@@ -2448,7 +2364,7 @@ isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        wr->send_wr = kzalloc(sizeof(struct ib_send_wr) * wr->send_wr_num,
                                GFP_KERNEL);
        if (!wr->send_wr) {
-               pr_debug("Unable to allocate wr->send_wr\n");
+               isert_dbg("Unable to allocate wr->send_wr\n");
                ret = -ENOMEM;
                goto unmap_cmd;
        }
@@ -2512,9 +2428,9 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
                        chunk_start = start_addr;
                end_addr = start_addr + ib_sg_dma_len(ib_dev, tmp_sg);
 
-               pr_debug("SGL[%d] dma_addr: 0x%16llx len: %u\n",
-                        i, (unsigned long long)tmp_sg->dma_address,
-                        tmp_sg->length);
+               isert_dbg("SGL[%d] dma_addr: 0x%llx len: %u\n",
+                         i, (unsigned long long)tmp_sg->dma_address,
+                         tmp_sg->length);
 
                if ((end_addr & ~PAGE_MASK) && i < last_ent) {
                        new_chunk = 0;
@@ -2525,8 +2441,8 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
                page = chunk_start & PAGE_MASK;
                do {
                        fr_pl[n_pages++] = page;
-                       pr_debug("Mapped page_list[%d] page_addr: 0x%16llx\n",
-                                n_pages - 1, page);
+                       isert_dbg("Mapped page_list[%d] page_addr: 0x%llx\n",
+                                 n_pages - 1, page);
                        page += PAGE_SIZE;
                } while (page < end_addr);
        }
@@ -2534,6 +2450,21 @@ isert_map_fr_pagelist(struct ib_device *ib_dev,
        return n_pages;
 }
 
+static inline void
+isert_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr)
+{
+       u32 rkey;
+
+       memset(inv_wr, 0, sizeof(*inv_wr));
+       inv_wr->wr_id = ISER_FASTREG_LI_WRID;
+       inv_wr->opcode = IB_WR_LOCAL_INV;
+       inv_wr->ex.invalidate_rkey = mr->rkey;
+
+       /* Bump the key */
+       rkey = ib_inc_rkey(mr->rkey);
+       ib_update_fast_reg_key(mr, rkey);
+}
+
 static int
 isert_fast_reg_mr(struct isert_conn *isert_conn,
                  struct fast_reg_descriptor *fr_desc,
@@ -2548,15 +2479,13 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
        struct ib_send_wr *bad_wr, *wr = NULL;
        int ret, pagelist_len;
        u32 page_off;
-       u8 key;
 
        if (mem->dma_nents == 1) {
                sge->lkey = isert_conn->conn_mr->lkey;
                sge->addr = ib_sg_dma_address(ib_dev, &mem->sg[0]);
                sge->length = ib_sg_dma_len(ib_dev, &mem->sg[0]);
-               pr_debug("%s:%d sge: addr: 0x%llx  length: %u lkey: %x\n",
-                        __func__, __LINE__, sge->addr, sge->length,
-                        sge->lkey);
+               isert_dbg("sge: addr: 0x%llx  length: %u lkey: %x\n",
+                        sge->addr, sge->length, sge->lkey);
                return 0;
        }
 
@@ -2572,21 +2501,15 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
 
        page_off = mem->offset % PAGE_SIZE;
 
-       pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
-                fr_desc, mem->nents, mem->offset);
+       isert_dbg("Use fr_desc %p sg_nents %d offset %u\n",
+                 fr_desc, mem->nents, mem->offset);
 
        pagelist_len = isert_map_fr_pagelist(ib_dev, mem->sg, mem->nents,
                                             &frpl->page_list[0]);
 
-       if (!(fr_desc->ind & ISERT_DATA_KEY_VALID)) {
-               memset(&inv_wr, 0, sizeof(inv_wr));
-               inv_wr.wr_id = ISER_FASTREG_LI_WRID;
-               inv_wr.opcode = IB_WR_LOCAL_INV;
-               inv_wr.ex.invalidate_rkey = mr->rkey;
+       if (!(fr_desc->ind & ind)) {
+               isert_inv_rkey(&inv_wr, mr);
                wr = &inv_wr;
-               /* Bump the key */
-               key = (u8)(mr->rkey & 0x000000FF);
-               ib_update_fast_reg_key(mr, ++key);
        }
 
        /* Prepare FASTREG WR */
@@ -2608,7 +2531,7 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
 
        ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
        if (ret) {
-               pr_err("fast registration failed, ret:%d\n", ret);
+               isert_err("fast registration failed, ret:%d\n", ret);
                return ret;
        }
        fr_desc->ind &= ~ind;
@@ -2617,9 +2540,8 @@ isert_fast_reg_mr(struct isert_conn *isert_conn,
        sge->addr = frpl->page_list[0] + page_off;
        sge->length = mem->len;
 
-       pr_debug("%s:%d sge: addr: 0x%llx  length: %u lkey: %x\n",
-                __func__, __LINE__, sge->addr, sge->length,
-                sge->lkey);
+       isert_dbg("sge: addr: 0x%llx  length: %u lkey: %x\n",
+                 sge->addr, sge->length, sge->lkey);
 
        return ret;
 }
@@ -2665,7 +2587,7 @@ isert_set_sig_attrs(struct se_cmd *se_cmd, struct ib_sig_attrs *sig_attrs)
                isert_set_dif_domain(se_cmd, sig_attrs, &sig_attrs->mem);
                break;
        default:
-               pr_err("Unsupported PI operation %d\n", se_cmd->prot_op);
+               isert_err("Unsupported PI operation %d\n", se_cmd->prot_op);
                return -EINVAL;
        }
 
@@ -2681,17 +2603,16 @@ isert_set_prot_checks(u8 prot_checks)
 }
 
 static int
-isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
-                struct fast_reg_descriptor *fr_desc,
-                struct ib_sge *data_sge, struct ib_sge *prot_sge,
-                struct ib_sge *sig_sge)
+isert_reg_sig_mr(struct isert_conn *isert_conn,
+                struct se_cmd *se_cmd,
+                struct isert_rdma_wr *rdma_wr,
+                struct fast_reg_descriptor *fr_desc)
 {
        struct ib_send_wr sig_wr, inv_wr;
        struct ib_send_wr *bad_wr, *wr = NULL;
        struct pi_context *pi_ctx = fr_desc->pi_ctx;
        struct ib_sig_attrs sig_attrs;
        int ret;
-       u32 key;
 
        memset(&sig_attrs, 0, sizeof(sig_attrs));
        ret = isert_set_sig_attrs(se_cmd, &sig_attrs);
@@ -2701,26 +2622,20 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
        sig_attrs.check_mask = isert_set_prot_checks(se_cmd->prot_checks);
 
        if (!(fr_desc->ind & ISERT_SIG_KEY_VALID)) {
-               memset(&inv_wr, 0, sizeof(inv_wr));
-               inv_wr.opcode = IB_WR_LOCAL_INV;
-               inv_wr.wr_id = ISER_FASTREG_LI_WRID;
-               inv_wr.ex.invalidate_rkey = pi_ctx->sig_mr->rkey;
+               isert_inv_rkey(&inv_wr, pi_ctx->sig_mr);
                wr = &inv_wr;
-               /* Bump the key */
-               key = (u8)(pi_ctx->sig_mr->rkey & 0x000000FF);
-               ib_update_fast_reg_key(pi_ctx->sig_mr, ++key);
        }
 
        memset(&sig_wr, 0, sizeof(sig_wr));
        sig_wr.opcode = IB_WR_REG_SIG_MR;
        sig_wr.wr_id = ISER_FASTREG_LI_WRID;
-       sig_wr.sg_list = data_sge;
+       sig_wr.sg_list = &rdma_wr->ib_sg[DATA];
        sig_wr.num_sge = 1;
        sig_wr.wr.sig_handover.access_flags = IB_ACCESS_LOCAL_WRITE;
        sig_wr.wr.sig_handover.sig_attrs = &sig_attrs;
        sig_wr.wr.sig_handover.sig_mr = pi_ctx->sig_mr;
        if (se_cmd->t_prot_sg)
-               sig_wr.wr.sig_handover.prot = prot_sge;
+               sig_wr.wr.sig_handover.prot = &rdma_wr->ib_sg[PROT];
 
        if (!wr)
                wr = &sig_wr;
@@ -2729,29 +2644,88 @@ isert_reg_sig_mr(struct isert_conn *isert_conn, struct se_cmd *se_cmd,
 
        ret = ib_post_send(isert_conn->conn_qp, wr, &bad_wr);
        if (ret) {
-               pr_err("fast registration failed, ret:%d\n", ret);
+               isert_err("fast registration failed, ret:%d\n", ret);
                goto err;
        }
        fr_desc->ind &= ~ISERT_SIG_KEY_VALID;
 
-       sig_sge->lkey = pi_ctx->sig_mr->lkey;
-       sig_sge->addr = 0;
-       sig_sge->length = se_cmd->data_length;
+       rdma_wr->ib_sg[SIG].lkey = pi_ctx->sig_mr->lkey;
+       rdma_wr->ib_sg[SIG].addr = 0;
+       rdma_wr->ib_sg[SIG].length = se_cmd->data_length;
        if (se_cmd->prot_op != TARGET_PROT_DIN_STRIP &&
            se_cmd->prot_op != TARGET_PROT_DOUT_INSERT)
                /*
                 * We have protection guards on the wire
                 * so we need to set a larget transfer
                 */
-               sig_sge->length += se_cmd->prot_length;
+               rdma_wr->ib_sg[SIG].length += se_cmd->prot_length;
 
-       pr_debug("sig_sge: addr: 0x%llx  length: %u lkey: %x\n",
-                sig_sge->addr, sig_sge->length,
-                sig_sge->lkey);
+       isert_dbg("sig_sge: addr: 0x%llx  length: %u lkey: %x\n",
+                 rdma_wr->ib_sg[SIG].addr, rdma_wr->ib_sg[SIG].length,
+                 rdma_wr->ib_sg[SIG].lkey);
 err:
        return ret;
 }
 
+static int
+isert_handle_prot_cmd(struct isert_conn *isert_conn,
+                     struct isert_cmd *isert_cmd,
+                     struct isert_rdma_wr *wr)
+{
+       struct isert_device *device = isert_conn->conn_device;
+       struct se_cmd *se_cmd = &isert_cmd->iscsi_cmd->se_cmd;
+       int ret;
+
+       if (!wr->fr_desc->pi_ctx) {
+               ret = isert_create_pi_ctx(wr->fr_desc,
+                                         device->ib_device,
+                                         isert_conn->conn_pd);
+               if (ret) {
+                       isert_err("conn %p failed to allocate pi_ctx\n",
+                                 isert_conn);
+                       return ret;
+               }
+       }
+
+       if (se_cmd->t_prot_sg) {
+               ret = isert_map_data_buf(isert_conn, isert_cmd,
+                                        se_cmd->t_prot_sg,
+                                        se_cmd->t_prot_nents,
+                                        se_cmd->prot_length,
+                                        0, wr->iser_ib_op, &wr->prot);
+               if (ret) {
+                       isert_err("conn %p failed to map protection buffer\n",
+                                 isert_conn);
+                       return ret;
+               }
+
+               memset(&wr->ib_sg[PROT], 0, sizeof(wr->ib_sg[PROT]));
+               ret = isert_fast_reg_mr(isert_conn, wr->fr_desc, &wr->prot,
+                                       ISERT_PROT_KEY_VALID, &wr->ib_sg[PROT]);
+               if (ret) {
+                       isert_err("conn %p failed to fast reg mr\n",
+                                 isert_conn);
+                       goto unmap_prot_cmd;
+               }
+       }
+
+       ret = isert_reg_sig_mr(isert_conn, se_cmd, wr, wr->fr_desc);
+       if (ret) {
+               isert_err("conn %p failed to fast reg mr\n",
+                         isert_conn);
+               goto unmap_prot_cmd;
+       }
+       wr->fr_desc->ind |= ISERT_PROTECTED;
+
+       return 0;
+
+unmap_prot_cmd:
+       if (se_cmd->t_prot_sg)
+               isert_unmap_data_buf(isert_conn, &wr->prot);
+
+       return ret;
+}
+
 static int
 isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
               struct isert_rdma_wr *wr)
@@ -2759,9 +2733,9 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        struct se_cmd *se_cmd = &cmd->se_cmd;
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        struct isert_conn *isert_conn = conn->context;
-       struct ib_sge data_sge;
-       struct ib_send_wr *send_wr;
        struct fast_reg_descriptor *fr_desc = NULL;
+       struct ib_send_wr *send_wr;
+       struct ib_sge *ib_sg;
        u32 offset;
        int ret = 0;
        unsigned long flags;
@@ -2775,8 +2749,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        if (ret)
                return ret;
 
-       if (wr->data.dma_nents != 1 ||
-           se_cmd->prot_op != TARGET_PROT_NORMAL) {
+       if (wr->data.dma_nents != 1 || isert_prot_cmd(isert_conn, se_cmd)) {
                spin_lock_irqsave(&isert_conn->conn_lock, flags);
                fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
                                           struct fast_reg_descriptor, list);
@@ -2786,38 +2759,21 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        }
 
        ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->data,
-                               ISERT_DATA_KEY_VALID, &data_sge);
+                               ISERT_DATA_KEY_VALID, &wr->ib_sg[DATA]);
        if (ret)
                goto unmap_cmd;
 
-       if (se_cmd->prot_op != TARGET_PROT_NORMAL) {
-               struct ib_sge prot_sge, sig_sge;
-
-               if (se_cmd->t_prot_sg) {
-                       ret = isert_map_data_buf(isert_conn, isert_cmd,
-                                                se_cmd->t_prot_sg,
-                                                se_cmd->t_prot_nents,
-                                                se_cmd->prot_length,
-                                                0, wr->iser_ib_op, &wr->prot);
-                       if (ret)
-                               goto unmap_cmd;
-
-                       ret = isert_fast_reg_mr(isert_conn, fr_desc, &wr->prot,
-                                               ISERT_PROT_KEY_VALID, &prot_sge);
-                       if (ret)
-                               goto unmap_prot_cmd;
-               }
-
-               ret = isert_reg_sig_mr(isert_conn, se_cmd, fr_desc,
-                                      &data_sge, &prot_sge, &sig_sge);
+       if (isert_prot_cmd(isert_conn, se_cmd)) {
+               ret = isert_handle_prot_cmd(isert_conn, isert_cmd, wr);
                if (ret)
-                       goto unmap_prot_cmd;
+                       goto unmap_cmd;
 
-               fr_desc->ind |= ISERT_PROTECTED;
-               memcpy(&wr->s_ib_sge, &sig_sge, sizeof(sig_sge));
-       } else
-               memcpy(&wr->s_ib_sge, &data_sge, sizeof(data_sge));
+               ib_sg = &wr->ib_sg[SIG];
+       } else {
+               ib_sg = &wr->ib_sg[DATA];
+       }
 
+       memcpy(&wr->s_ib_sge, ib_sg, sizeof(*ib_sg));
        wr->ib_sge = &wr->s_ib_sge;
        wr->send_wr_num = 1;
        memset(&wr->s_send_wr, 0, sizeof(*send_wr));
@@ -2827,12 +2783,12 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        send_wr = &isert_cmd->rdma_wr.s_send_wr;
        send_wr->sg_list = &wr->s_ib_sge;
        send_wr->num_sge = 1;
-       send_wr->wr_id = (unsigned long)&isert_cmd->tx_desc;
+       send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
        if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
                send_wr->opcode = IB_WR_RDMA_WRITE;
                send_wr->wr.rdma.remote_addr = isert_cmd->read_va;
                send_wr->wr.rdma.rkey = isert_cmd->read_stag;
-               send_wr->send_flags = se_cmd->prot_op == TARGET_PROT_NORMAL ?
+               send_wr->send_flags = !isert_prot_cmd(isert_conn, se_cmd) ?
                                      0 : IB_SEND_SIGNALED;
        } else {
                send_wr->opcode = IB_WR_RDMA_READ;
@@ -2842,9 +2798,7 @@ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
        }
 
        return 0;
-unmap_prot_cmd:
-       if (se_cmd->t_prot_sg)
-               isert_unmap_data_buf(isert_conn, &wr->prot);
+
 unmap_cmd:
        if (fr_desc) {
                spin_lock_irqsave(&isert_conn->conn_lock, flags);
@@ -2867,16 +2821,17 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
        struct ib_send_wr *wr_failed;
        int rc;
 
-       pr_debug("Cmd: %p RDMA_WRITE data_length: %u\n",
+       isert_dbg("Cmd: %p RDMA_WRITE data_length: %u\n",
                 isert_cmd, se_cmd->data_length);
+
        wr->iser_ib_op = ISER_IB_RDMA_WRITE;
        rc = device->reg_rdma_mem(conn, cmd, wr);
        if (rc) {
-               pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
+               isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
                return rc;
        }
 
-       if (se_cmd->prot_op == TARGET_PROT_NORMAL) {
+       if (!isert_prot_cmd(isert_conn, se_cmd)) {
                /*
                 * Build isert_conn->tx_desc for iSCSI response PDU and attach
                 */
@@ -2886,24 +2841,20 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
                                     &isert_cmd->tx_desc.iscsi_header);
                isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
                isert_init_send_wr(isert_conn, isert_cmd,
-                                  &isert_cmd->tx_desc.send_wr, false);
+                                  &isert_cmd->tx_desc.send_wr);
                isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
                wr->send_wr_num += 1;
        }
 
-       atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
-
        rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
-       if (rc) {
-               pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
-               atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
-       }
+       if (rc)
+               isert_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
 
-       if (se_cmd->prot_op == TARGET_PROT_NORMAL)
-               pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
+       if (!isert_prot_cmd(isert_conn, se_cmd))
+               isert_dbg("Cmd: %p posted RDMA_WRITE + Response for iSER Data "
                         "READ\n", isert_cmd);
        else
-               pr_debug("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
+               isert_dbg("Cmd: %p posted RDMA_WRITE for iSER Data READ\n",
                         isert_cmd);
 
        return 1;
@@ -2920,23 +2871,20 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
        struct ib_send_wr *wr_failed;
        int rc;
 
-       pr_debug("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
+       isert_dbg("Cmd: %p RDMA_READ data_length: %u write_data_done: %u\n",
                 isert_cmd, se_cmd->data_length, cmd->write_data_done);
        wr->iser_ib_op = ISER_IB_RDMA_READ;
        rc = device->reg_rdma_mem(conn, cmd, wr);
        if (rc) {
-               pr_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
+               isert_err("Cmd: %p failed to prepare RDMA res\n", isert_cmd);
                return rc;
        }
 
-       atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
-
        rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
-       if (rc) {
-               pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
-               atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
-       }
-       pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
+       if (rc)
+               isert_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
+
+       isert_dbg("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
                 isert_cmd);
 
        return 0;
@@ -2952,7 +2900,7 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
                ret = isert_put_nopin(cmd, conn, false);
                break;
        default:
-               pr_err("Unknown immediate state: 0x%02x\n", state);
+               isert_err("Unknown immediate state: 0x%02x\n", state);
                ret = -EINVAL;
                break;
        }
@@ -2963,15 +2911,14 @@ isert_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
 static int
 isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
 {
+       struct isert_conn *isert_conn = conn->context;
        int ret;
 
        switch (state) {
        case ISTATE_SEND_LOGOUTRSP:
                ret = isert_put_logout_rsp(cmd, conn);
-               if (!ret) {
-                       pr_debug("Returning iSER Logout -EAGAIN\n");
-                       ret = -EAGAIN;
-               }
+               if (!ret)
+                       isert_conn->logout_posted = true;
                break;
        case ISTATE_SEND_NOPIN:
                ret = isert_put_nopin(cmd, conn, true);
@@ -2993,7 +2940,7 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
                ret = isert_put_response(conn, cmd);
                break;
        default:
-               pr_err("Unknown response state: 0x%02x\n", state);
+               isert_err("Unknown response state: 0x%02x\n", state);
                ret = -EINVAL;
                break;
        }
@@ -3001,27 +2948,64 @@ isert_response_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state)
        return ret;
 }
 
+struct rdma_cm_id *
+isert_setup_id(struct isert_np *isert_np)
+{
+       struct iscsi_np *np = isert_np->np;
+       struct rdma_cm_id *id;
+       struct sockaddr *sa;
+       int ret;
+
+       sa = (struct sockaddr *)&np->np_sockaddr;
+       isert_dbg("ksockaddr: %p, sa: %p\n", &np->np_sockaddr, sa);
+
+       id = rdma_create_id(isert_cma_handler, isert_np,
+                           RDMA_PS_TCP, IB_QPT_RC);
+       if (IS_ERR(id)) {
+               isert_err("rdma_create_id() failed: %ld\n", PTR_ERR(id));
+               ret = PTR_ERR(id);
+               goto out;
+       }
+       isert_dbg("id %p context %p\n", id, id->context);
+
+       ret = rdma_bind_addr(id, sa);
+       if (ret) {
+               isert_err("rdma_bind_addr() failed: %d\n", ret);
+               goto out_id;
+       }
+
+       ret = rdma_listen(id, ISERT_RDMA_LISTEN_BACKLOG);
+       if (ret) {
+               isert_err("rdma_listen() failed: %d\n", ret);
+               goto out_id;
+       }
+
+       return id;
+out_id:
+       rdma_destroy_id(id);
+out:
+       return ERR_PTR(ret);
+}
+
 static int
 isert_setup_np(struct iscsi_np *np,
               struct __kernel_sockaddr_storage *ksockaddr)
 {
        struct isert_np *isert_np;
        struct rdma_cm_id *isert_lid;
-       struct sockaddr *sa;
        int ret;
 
        isert_np = kzalloc(sizeof(struct isert_np), GFP_KERNEL);
        if (!isert_np) {
-               pr_err("Unable to allocate struct isert_np\n");
+               isert_err("Unable to allocate struct isert_np\n");
                return -ENOMEM;
        }
        sema_init(&isert_np->np_sem, 0);
        mutex_init(&isert_np->np_accept_mutex);
        INIT_LIST_HEAD(&isert_np->np_accept_list);
        init_completion(&isert_np->np_login_comp);
+       isert_np->np = np;
 
-       sa = (struct sockaddr *)ksockaddr;
-       pr_debug("ksockaddr: %p, sa: %p\n", ksockaddr, sa);
        /*
         * Setup the np->np_sockaddr from the passed sockaddr setup
         * in iscsi_target_configfs.c code..
@@ -3029,37 +3013,20 @@ isert_setup_np(struct iscsi_np *np,
        memcpy(&np->np_sockaddr, ksockaddr,
               sizeof(struct __kernel_sockaddr_storage));
 
-       isert_lid = rdma_create_id(isert_cma_handler, np, RDMA_PS_TCP,
-                               IB_QPT_RC);
+       isert_lid = isert_setup_id(isert_np);
        if (IS_ERR(isert_lid)) {
-               pr_err("rdma_create_id() for isert_listen_handler failed: %ld\n",
-                      PTR_ERR(isert_lid));
                ret = PTR_ERR(isert_lid);
                goto out;
        }
 
-       ret = rdma_bind_addr(isert_lid, sa);
-       if (ret) {
-               pr_err("rdma_bind_addr() for isert_lid failed: %d\n", ret);
-               goto out_lid;
-       }
-
-       ret = rdma_listen(isert_lid, ISERT_RDMA_LISTEN_BACKLOG);
-       if (ret) {
-               pr_err("rdma_listen() for isert_lid failed: %d\n", ret);
-               goto out_lid;
-       }
-
        isert_np->np_cm_id = isert_lid;
        np->np_context = isert_np;
-       pr_debug("Setup isert_lid->context: %p\n", isert_lid->context);
 
        return 0;
 
-out_lid:
-       rdma_destroy_id(isert_lid);
 out:
        kfree(isert_np);
+
        return ret;
 }
 
@@ -3075,16 +3042,12 @@ isert_rdma_accept(struct isert_conn *isert_conn)
        cp.retry_count = 7;
        cp.rnr_retry_count = 7;
 
-       pr_debug("Before rdma_accept >>>>>>>>>>>>>>>>>>>>.\n");
-
        ret = rdma_accept(cm_id, &cp);
        if (ret) {
-               pr_err("rdma_accept() failed with: %d\n", ret);
+               isert_err("rdma_accept() failed with: %d\n", ret);
                return ret;
        }
 
-       pr_debug("After rdma_accept >>>>>>>>>>>>>>>>>>>>>.\n");
-
        return 0;
 }
 
@@ -3094,7 +3057,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
        int ret;
 
-       pr_debug("isert_get_login_rx before conn_login_comp conn: %p\n", conn);
+       isert_info("before login_req comp conn: %p\n", isert_conn);
+       ret = wait_for_completion_interruptible(&isert_conn->login_req_comp);
+       if (ret) {
+               isert_err("isert_conn %p interrupted before got login req\n",
+                         isert_conn);
+               return ret;
+       }
+       reinit_completion(&isert_conn->login_req_comp);
+
        /*
         * For login requests after the first PDU, isert_rx_login_req() will
         * kick schedule_delayed_work(&conn->login_work) as the packet is
@@ -3104,11 +3075,15 @@ isert_get_login_rx(struct iscsi_conn *conn, struct iscsi_login *login)
        if (!login->first_request)
                return 0;
 
+       isert_rx_login_req(isert_conn);
+
+       isert_info("before conn_login_comp conn: %p\n", conn);
        ret = wait_for_completion_interruptible(&isert_conn->conn_login_comp);
        if (ret)
                return ret;
 
-       pr_debug("isert_get_login_rx processing login->req: %p\n", login->req);
+       isert_info("processing login->req: %p\n", login->req);
+
        return 0;
 }
 
@@ -3161,7 +3136,7 @@ accept_wait:
        spin_lock_bh(&np->np_thread_lock);
        if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
                spin_unlock_bh(&np->np_thread_lock);
-               pr_debug("np_thread_state %d for isert_accept_np\n",
+               isert_dbg("np_thread_state %d for isert_accept_np\n",
                         np->np_thread_state);
                /**
                 * No point in stalling here when np_thread
@@ -3186,17 +3161,10 @@ accept_wait:
        isert_conn->conn = conn;
        max_accept = 0;
 
-       ret = isert_rdma_post_recvl(isert_conn);
-       if (ret)
-               return ret;
-
-       ret = isert_rdma_accept(isert_conn);
-       if (ret)
-               return ret;
-
        isert_set_conn_info(np, conn, isert_conn);
 
-       pr_debug("Processing isert_accept_np: isert_conn: %p\n", isert_conn);
+       isert_dbg("Processing isert_conn: %p\n", isert_conn);
+
        return 0;
 }
 
@@ -3204,25 +3172,103 @@ static void
 isert_free_np(struct iscsi_np *np)
 {
        struct isert_np *isert_np = (struct isert_np *)np->np_context;
+       struct isert_conn *isert_conn, *n;
 
        if (isert_np->np_cm_id)
                rdma_destroy_id(isert_np->np_cm_id);
 
+       /*
+        * FIXME: At this point we don't have a good way to insure
+        * that at this point we don't have hanging connections that
+        * completed RDMA establishment but didn't start iscsi login
+        * process. So work-around this by cleaning up what ever piled
+        * up in np_accept_list.
+        */
+       mutex_lock(&isert_np->np_accept_mutex);
+       if (!list_empty(&isert_np->np_accept_list)) {
+               isert_info("Still have isert connections, cleaning up...\n");
+               list_for_each_entry_safe(isert_conn, n,
+                                        &isert_np->np_accept_list,
+                                        conn_accept_node) {
+                       isert_info("cleaning isert_conn %p state (%d)\n",
+                                  isert_conn, isert_conn->state);
+                       isert_connect_release(isert_conn);
+               }
+       }
+       mutex_unlock(&isert_np->np_accept_mutex);
+
        np->np_context = NULL;
        kfree(isert_np);
 }
 
+static void isert_release_work(struct work_struct *work)
+{
+       struct isert_conn *isert_conn = container_of(work,
+                                                    struct isert_conn,
+                                                    release_work);
+
+       isert_info("Starting release conn %p\n", isert_conn);
+
+       wait_for_completion(&isert_conn->conn_wait);
+
+       mutex_lock(&isert_conn->conn_mutex);
+       isert_conn->state = ISER_CONN_DOWN;
+       mutex_unlock(&isert_conn->conn_mutex);
+
+       isert_info("Destroying conn %p\n", isert_conn);
+       isert_put_conn(isert_conn);
+}
+
+static void
+isert_wait4logout(struct isert_conn *isert_conn)
+{
+       struct iscsi_conn *conn = isert_conn->conn;
+
+       isert_info("conn %p\n", isert_conn);
+
+       if (isert_conn->logout_posted) {
+               isert_info("conn %p wait for conn_logout_comp\n", isert_conn);
+               wait_for_completion_timeout(&conn->conn_logout_comp,
+                                           SECONDS_FOR_LOGOUT_COMP * HZ);
+       }
+}
+
+static void
+isert_wait4cmds(struct iscsi_conn *conn)
+{
+       isert_info("iscsi_conn %p\n", conn);
+
+       if (conn->sess) {
+               target_sess_cmd_list_set_waiting(conn->sess->se_sess);
+               target_wait_for_sess_cmds(conn->sess->se_sess);
+       }
+}
+
+static void
+isert_wait4flush(struct isert_conn *isert_conn)
+{
+       struct ib_recv_wr *bad_wr;
+
+       isert_info("conn %p\n", isert_conn);
+
+       init_completion(&isert_conn->conn_wait_comp_err);
+       isert_conn->beacon.wr_id = ISER_BEACON_WRID;
+       /* post an indication that all flush errors were consumed */
+       if (ib_post_recv(isert_conn->conn_qp, &isert_conn->beacon, &bad_wr)) {
+               isert_err("conn %p failed to post beacon", isert_conn);
+               return;
+       }
+
+       wait_for_completion(&isert_conn->conn_wait_comp_err);
+}
+
 static void isert_wait_conn(struct iscsi_conn *conn)
 {
        struct isert_conn *isert_conn = conn->context;
 
-       pr_debug("isert_wait_conn: Starting \n");
+       isert_info("Starting conn %p\n", isert_conn);
 
        mutex_lock(&isert_conn->conn_mutex);
-       if (isert_conn->conn_cm_id && !isert_conn->disconnect) {
-               pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
-               rdma_disconnect(isert_conn->conn_cm_id);
-       }
        /*
         * Only wait for conn_wait_comp_err if the isert_conn made it
         * into full feature phase..
@@ -3231,14 +3277,15 @@ static void isert_wait_conn(struct iscsi_conn *conn)
                mutex_unlock(&isert_conn->conn_mutex);
                return;
        }
-       if (isert_conn->state == ISER_CONN_UP)
-               isert_conn->state = ISER_CONN_TERMINATING;
+       isert_conn_terminate(isert_conn);
        mutex_unlock(&isert_conn->conn_mutex);
 
-       wait_for_completion(&isert_conn->conn_wait_comp_err);
+       isert_wait4cmds(conn);
+       isert_wait4flush(isert_conn);
+       isert_wait4logout(isert_conn);
 
-       wait_for_completion(&isert_conn->conn_wait);
-       isert_put_conn(isert_conn);
+       INIT_WORK(&isert_conn->release_work, isert_release_work);
+       queue_work(isert_release_wq, &isert_conn->release_work);
 }
 
 static void isert_free_conn(struct iscsi_conn *conn)
@@ -3273,35 +3320,39 @@ static int __init isert_init(void)
 {
        int ret;
 
-       isert_rx_wq = alloc_workqueue("isert_rx_wq", 0, 0);
-       if (!isert_rx_wq) {
-               pr_err("Unable to allocate isert_rx_wq\n");
+       isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
+       if (!isert_comp_wq) {
+               isert_err("Unable to allocate isert_comp_wq\n");
+               ret = -ENOMEM;
                return -ENOMEM;
        }
 
-       isert_comp_wq = alloc_workqueue("isert_comp_wq", 0, 0);
-       if (!isert_comp_wq) {
-               pr_err("Unable to allocate isert_comp_wq\n");
+       isert_release_wq = alloc_workqueue("isert_release_wq", WQ_UNBOUND,
+                                       WQ_UNBOUND_MAX_ACTIVE);
+       if (!isert_release_wq) {
+               isert_err("Unable to allocate isert_release_wq\n");
                ret = -ENOMEM;
-               goto destroy_rx_wq;
+               goto destroy_comp_wq;
        }
 
        iscsit_register_transport(&iser_target_transport);
-       pr_debug("iSER_TARGET[0] - Loaded iser_target_transport\n");
+       isert_info("iSER_TARGET[0] - Loaded iser_target_transport\n");
+
        return 0;
 
-destroy_rx_wq:
-       destroy_workqueue(isert_rx_wq);
+destroy_comp_wq:
+       destroy_workqueue(isert_comp_wq);
+
        return ret;
 }
 
 static void __exit isert_exit(void)
 {
        flush_scheduled_work();
+       destroy_workqueue(isert_release_wq);
        destroy_workqueue(isert_comp_wq);
-       destroy_workqueue(isert_rx_wq);
        iscsit_unregister_transport(&iser_target_transport);
-       pr_debug("iSER_TARGET[0] - Released iser_target_transport\n");
+       isert_info("iSER_TARGET[0] - Released iser_target_transport\n");
 }
 
 MODULE_DESCRIPTION("iSER-Target for mainline target infrastructure");
index 04f51f7bf614735b47d782b2ffa97d417e25fcd5..8dc8415d152d00ca550f9cfa993d0da0bacb27c2 100644 (file)
@@ -4,9 +4,37 @@
 #include <rdma/ib_verbs.h>
 #include <rdma/rdma_cm.h>
 
+#define DRV_NAME       "isert"
+#define PFX            DRV_NAME ": "
+
+#define isert_dbg(fmt, arg...)                          \
+       do {                                             \
+               if (unlikely(isert_debug_level > 2))     \
+                       printk(KERN_DEBUG PFX "%s: " fmt,\
+                               __func__ , ## arg);      \
+       } while (0)
+
+#define isert_warn(fmt, arg...)                                \
+       do {                                            \
+               if (unlikely(isert_debug_level > 0))    \
+                       pr_warn(PFX "%s: " fmt,         \
+                               __func__ , ## arg);     \
+       } while (0)
+
+#define isert_info(fmt, arg...)                                \
+       do {                                            \
+               if (unlikely(isert_debug_level > 1))    \
+                       pr_info(PFX "%s: " fmt,         \
+                               __func__ , ## arg);     \
+       } while (0)
+
+#define isert_err(fmt, arg...) \
+       pr_err(PFX "%s: " fmt, __func__ , ## arg)
+
 #define ISERT_RDMA_LISTEN_BACKLOG      10
 #define ISCSI_ISER_SG_TABLESIZE                256
 #define ISER_FASTREG_LI_WRID           0xffffffffffffffffULL
+#define ISER_BEACON_WRID               0xfffffffffffffffeULL
 
 enum isert_desc_type {
        ISCSI_TX_CONTROL,
@@ -23,6 +51,7 @@ enum iser_ib_op_code {
 enum iser_conn_state {
        ISER_CONN_INIT,
        ISER_CONN_UP,
+       ISER_CONN_FULL_FEATURE,
        ISER_CONN_TERMINATING,
        ISER_CONN_DOWN,
 };
@@ -44,9 +73,6 @@ struct iser_tx_desc {
        struct ib_sge   tx_sg[2];
        int             num_sge;
        struct isert_cmd *isert_cmd;
-       struct llist_node *comp_llnode_batch;
-       struct llist_node comp_llnode;
-       bool            llnode_active;
        struct ib_send_wr send_wr;
 } __packed;
 
@@ -81,6 +107,12 @@ struct isert_data_buf {
        enum dma_data_direction dma_dir;
 };
 
+enum {
+       DATA = 0,
+       PROT = 1,
+       SIG = 2,
+};
+
 struct isert_rdma_wr {
        struct list_head        wr_list;
        struct isert_cmd        *isert_cmd;
@@ -90,6 +122,7 @@ struct isert_rdma_wr {
        int                     send_wr_num;
        struct ib_send_wr       *send_wr;
        struct ib_send_wr       s_send_wr;
+       struct ib_sge           ib_sg[3];
        struct isert_data_buf   data;
        struct isert_data_buf   prot;
        struct fast_reg_descriptor *fr_desc;
@@ -117,14 +150,15 @@ struct isert_device;
 struct isert_conn {
        enum iser_conn_state    state;
        int                     post_recv_buf_count;
-       atomic_t                post_send_buf_count;
        u32                     responder_resources;
        u32                     initiator_depth;
+       bool                    pi_support;
        u32                     max_sge;
        char                    *login_buf;
        char                    *login_req_buf;
        char                    *login_rsp_buf;
        u64                     login_req_dma;
+       int                     login_req_len;
        u64                     login_rsp_dma;
        unsigned int            conn_rx_desc_head;
        struct iser_rx_desc     *conn_rx_descs;
@@ -132,13 +166,13 @@ struct isert_conn {
        struct iscsi_conn       *conn;
        struct list_head        conn_accept_node;
        struct completion       conn_login_comp;
+       struct completion       login_req_comp;
        struct iser_tx_desc     conn_login_tx_desc;
        struct rdma_cm_id       *conn_cm_id;
        struct ib_pd            *conn_pd;
        struct ib_mr            *conn_mr;
        struct ib_qp            *conn_qp;
        struct isert_device     *conn_device;
-       struct work_struct      conn_logout_work;
        struct mutex            conn_mutex;
        struct completion       conn_wait;
        struct completion       conn_wait_comp_err;
@@ -147,31 +181,38 @@ struct isert_conn {
        int                     conn_fr_pool_size;
        /* lock to protect fastreg pool */
        spinlock_t              conn_lock;
-#define ISERT_COMP_BATCH_COUNT 8
-       int                     conn_comp_batch;
-       struct llist_head       conn_comp_llist;
-       bool                    disconnect;
+       struct work_struct      release_work;
+       struct ib_recv_wr       beacon;
+       bool                    logout_posted;
 };
 
 #define ISERT_MAX_CQ 64
 
-struct isert_cq_desc {
-       struct isert_device     *device;
-       int                     cq_index;
-       struct work_struct      cq_rx_work;
-       struct work_struct      cq_tx_work;
+/**
+ * struct isert_comp - iSER completion context
+ *
+ * @device:     pointer to device handle
+ * @cq:         completion queue
+ * @wcs:        work completion array
+ * @active_qps: Number of active QPs attached
+ *              to completion context
+ * @work:       completion work handle
+ */
+struct isert_comp {
+       struct isert_device     *device;
+       struct ib_cq            *cq;
+       struct ib_wc             wcs[16];
+       int                      active_qps;
+       struct work_struct       work;
 };
 
 struct isert_device {
        int                     use_fastreg;
        bool                    pi_capable;
-       int                     cqs_used;
        int                     refcount;
-       int                     cq_active_qps[ISERT_MAX_CQ];
        struct ib_device        *ib_device;
-       struct ib_cq            *dev_rx_cq[ISERT_MAX_CQ];
-       struct ib_cq            *dev_tx_cq[ISERT_MAX_CQ];
-       struct isert_cq_desc    *cq_desc;
+       struct isert_comp       *comps;
+       int                     comps_used;
        struct list_head        dev_node;
        struct ib_device_attr   dev_attr;
        int                     (*reg_rdma_mem)(struct iscsi_conn *conn,
@@ -182,6 +223,7 @@ struct isert_device {
 };
 
 struct isert_np {
+       struct iscsi_np         *np;
        struct semaphore        np_sem;
        struct rdma_cm_id       *np_cm_id;
        struct mutex            np_accept_mutex;
index db3c8c851af16cd22524f080d2db91581621b020..0747c0595a9d42b2ff8c9cb231b38be939821ddb 100644 (file)
@@ -2740,7 +2740,6 @@ static struct scsi_host_template srp_template = {
        .info                           = srp_target_info,
        .queuecommand                   = srp_queuecommand,
        .change_queue_depth             = srp_change_queue_depth,
-       .change_queue_type              = scsi_change_queue_type,
        .eh_abort_handler               = srp_abort,
        .eh_device_reset_handler        = srp_reset_device,
        .eh_host_reset_handler          = srp_reset_host,
index dc829682701ad1dbae8375eb2ff9a2c97feea48f..eb694ddad79fe069b6d2f796004a5e8acc77e833 100644 (file)
@@ -1708,17 +1708,17 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
 
        switch (srp_cmd->task_attr) {
        case SRP_CMD_SIMPLE_Q:
-               cmd->sam_task_attr = MSG_SIMPLE_TAG;
+               cmd->sam_task_attr = TCM_SIMPLE_TAG;
                break;
        case SRP_CMD_ORDERED_Q:
        default:
-               cmd->sam_task_attr = MSG_ORDERED_TAG;
+               cmd->sam_task_attr = TCM_ORDERED_TAG;
                break;
        case SRP_CMD_HEAD_OF_Q:
-               cmd->sam_task_attr = MSG_HEAD_TAG;
+               cmd->sam_task_attr = TCM_HEAD_TAG;
                break;
        case SRP_CMD_ACA:
-               cmd->sam_task_attr = MSG_ACA_TAG;
+               cmd->sam_task_attr = TCM_ACA_TAG;
                break;
        }
 
@@ -1733,7 +1733,7 @@ static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
                                       sizeof(srp_cmd->lun));
        rc = target_submit_cmd(cmd, ch->sess, srp_cmd->cdb,
                        &send_ioctx->sense_data[0], unpacked_lun, data_len,
-                       MSG_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
+                       TCM_SIMPLE_TAG, dir, TARGET_SCF_ACK_KREF);
        if (rc != 0) {
                ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                goto send_sense;
index b205f76d71296d7370d2e9cf2a72c8941bfa6c57..98024856df07fc89e744cb1d7b2356a72146de51 100644 (file)
@@ -4071,7 +4071,7 @@ static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
        int devid;
        int ret;
 
-       cfg = irq_get_chip_data(irq);
+       cfg = irq_cfg(irq);
        if (!cfg)
                return -EINVAL;
 
@@ -4134,7 +4134,7 @@ static int set_affinity(struct irq_data *data, const struct cpumask *mask,
        if (!config_enabled(CONFIG_SMP))
                return -1;
 
-       cfg       = data->chip_data;
+       cfg       = irqd_cfg(data);
        irq       = data->irq;
        irte_info = &cfg->irq_2_irte;
 
@@ -4172,7 +4172,7 @@ static int free_irq(int irq)
        struct irq_2_irte *irte_info;
        struct irq_cfg *cfg;
 
-       cfg = irq_get_chip_data(irq);
+       cfg = irq_cfg(irq);
        if (!cfg)
                return -EINVAL;
 
@@ -4191,7 +4191,7 @@ static void compose_msi_msg(struct pci_dev *pdev,
        struct irq_cfg *cfg;
        union irte irte;
 
-       cfg = irq_get_chip_data(irq);
+       cfg = irq_cfg(irq);
        if (!cfg)
                return;
 
@@ -4220,7 +4220,7 @@ static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
        if (!pdev)
                return -EINVAL;
 
-       cfg = irq_get_chip_data(irq);
+       cfg = irq_cfg(irq);
        if (!cfg)
                return -EINVAL;
 
@@ -4240,7 +4240,7 @@ static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
        if (!pdev)
                return -EINVAL;
 
-       cfg = irq_get_chip_data(irq);
+       cfg = irq_cfg(irq);
        if (!cfg)
                return -EINVAL;
 
@@ -4263,7 +4263,7 @@ static int alloc_hpet_msi(unsigned int irq, unsigned int id)
        struct irq_cfg *cfg;
        int index, devid;
 
-       cfg = irq_get_chip_data(irq);
+       cfg = irq_cfg(irq);
        if (!cfg)
                return -EINVAL;
 
index 27541d4408491a73a606bcc85fdd8bc785ed9906..a55b207b9425e30bfdce4c3cab4e26d58ce4eaa6 100644 (file)
@@ -54,7 +54,7 @@ static int __init parse_ioapics_under_ir(void);
 
 static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
 {
-       struct irq_cfg *cfg = irq_get_chip_data(irq);
+       struct irq_cfg *cfg = irq_cfg(irq);
        return cfg ? &cfg->irq_2_iommu : NULL;
 }
 
@@ -85,7 +85,7 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
 {
        struct ir_table *table = iommu->ir_table;
        struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       struct irq_cfg *cfg = irq_get_chip_data(irq);
+       struct irq_cfg *cfg = irq_cfg(irq);
        unsigned int mask = 0;
        unsigned long flags;
        int index;
@@ -153,7 +153,7 @@ static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
 static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
 {
        struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       struct irq_cfg *cfg = irq_get_chip_data(irq);
+       struct irq_cfg *cfg = irq_cfg(irq);
        unsigned long flags;
 
        if (!irq_iommu)
@@ -1050,7 +1050,7 @@ static int
 intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
                          bool force)
 {
-       struct irq_cfg *cfg = data->chip_data;
+       struct irq_cfg *cfg = irqd_cfg(data);
        unsigned int dest, irq = data->irq;
        struct irte irte;
        int err;
@@ -1105,7 +1105,7 @@ static void intel_compose_msi_msg(struct pci_dev *pdev,
        u16 sub_handle = 0;
        int ir_index;
 
-       cfg = irq_get_chip_data(irq);
+       cfg = irq_cfg(irq);
 
        ir_index = map_irq_to_irte_handle(irq, &sub_handle);
        BUG_ON(ir_index == -1);
index 2c3f5ad010987d77ddda7b271fb847360cd4bc42..89c4846683be521a1b3fd968afcf9a031e69043a 100644 (file)
@@ -298,7 +298,7 @@ static int set_remapped_irq_affinity(struct irq_data *data,
 
 void free_remapped_irq(int irq)
 {
-       struct irq_cfg *cfg = irq_get_chip_data(irq);
+       struct irq_cfg *cfg = irq_cfg(irq);
 
        if (!remap_ops || !remap_ops->free_irq)
                return;
@@ -311,7 +311,7 @@ void compose_remapped_msi_msg(struct pci_dev *pdev,
                              unsigned int irq, unsigned int dest,
                              struct msi_msg *msg, u8 hpet_id)
 {
-       struct irq_cfg *cfg = irq_get_chip_data(irq);
+       struct irq_cfg *cfg = irq_cfg(irq);
 
        if (!irq_remapped(cfg))
                native_compose_msi_msg(pdev, irq, dest, msg, hpet_id);
@@ -364,7 +364,7 @@ static void ir_ack_apic_edge(struct irq_data *data)
 static void ir_ack_apic_level(struct irq_data *data)
 {
        ack_APIC_irq();
-       eoi_ioapic_irq(data->irq, data->chip_data);
+       eoi_ioapic_irq(data->irq, irqd_cfg(data));
 }
 
 static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
index 3067d56b11a6b6af70ac980ec42d1e8846dd87ca..5844b80bd90e71f4f6956b5cfd97f0a8b7867c91 100644 (file)
@@ -204,16 +204,6 @@ config THERM_ADT746X
           iBook G4, and the ATI based aluminium PowerBooks, allowing slightly
          better fan behaviour by default, and some manual control.
 
-config THERM_PM72
-       tristate "Support for thermal management on PowerMac G5 (AGP)"
-       depends on I2C && I2C_POWERMAC && PPC_PMAC64
-       default n
-       help
-         This driver provides thermostat and fan control for the desktop
-         G5 machines.
-
-         This is deprecated, use windfarm instead.
-
 config WINDFARM
        tristate "New PowerMac thermal control infrastructure"
        depends on PPC
index d2f0120bc878379f460fc9a9d0416d2ae2e89bdd..383ba920085b3a4f5c81ab8a01c5fc32a2126313 100644 (file)
@@ -25,7 +25,6 @@ obj-$(CONFIG_ADB_IOP)         += adb-iop.o
 obj-$(CONFIG_ADB_PMU68K)       += via-pmu68k.o
 obj-$(CONFIG_ADB_MACIO)                += macio-adb.o
 
-obj-$(CONFIG_THERM_PM72)       += therm_pm72.o
 obj-$(CONFIG_THERM_WINDTUNNEL) += therm_windtunnel.o
 obj-$(CONFIG_THERM_ADT746X)    += therm_adt746x.o
 obj-$(CONFIG_WINDFARM)         += windfarm_core.o
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
deleted file mode 100644 (file)
index 7ed9258..0000000
+++ /dev/null
@@ -1,2278 +0,0 @@
-/*
- * Device driver for the thermostats & fan controller of  the
- * Apple G5 "PowerMac7,2" desktop machines.
- *
- * (c) Copyright IBM Corp. 2003-2004
- *
- * Maintained by: Benjamin Herrenschmidt
- *                <benh@kernel.crashing.org>
- * 
- *
- * The algorithm used is the PID control algorithm, used the same
- * way the published Darwin code does, using the same values that
- * are present in the Darwin 7.0 snapshot property lists.
- *
- * As far as the CPUs control loops are concerned, I use the
- * calibration & PID constants provided by the EEPROM,
- * I do _not_ embed any value from the property lists, as the ones
- * provided by Darwin 7.0 seem to always have an older version that
- * what I've seen on the actual computers.
- * It would be interesting to verify that though. Darwin has a
- * version code of 1.0.0d11 for all control loops it seems, while
- * so far, the machines EEPROMs contain a dataset versioned 1.0.0f
- *
- * Darwin doesn't provide source to all parts, some missing
- * bits like the AppleFCU driver or the actual scale of some
- * of the values returned by sensors had to be "guessed" some
- * way... or based on what Open Firmware does.
- *
- * I didn't yet figure out how to get the slots power consumption
- * out of the FCU, so that part has not been implemented yet and
- * the slots fan is set to a fixed 50% PWM, hoping this value is
- * safe enough ...
- *
- * Note: I have observed strange oscillations of the CPU control
- * loop on a dual G5 here. When idle, the CPU exhaust fan tend to
- * oscillates slowly (over several minutes) between the minimum
- * of 300RPMs and approx. 1000 RPMs. I don't know what is causing
- * this, it could be some incorrect constant or an error in the
- * way I ported the algorithm, or it could be just normal. I
- * don't have full understanding on the way Apple tweaked the PID
- * algorithm for the CPU control, it is definitely not a standard
- * implementation...
- *
- * TODO:  - Check MPU structure version/signature
- *        - Add things like /sbin/overtemp for non-critical
- *          overtemp conditions so userland can take some policy
- *          decisions, like slowing down CPUs
- *       - Deal with fan and i2c failures in a better way
- *       - Maybe do a generic PID based on params used for
- *         U3 and Drives ? Definitely need to factor code a bit
- *          better... also make sensor detection more robust using
- *          the device-tree to probe for them
- *        - Figure out how to get the slots consumption and set the
- *          slots fan accordingly
- *
- * History:
- *
- *  Nov. 13, 2003 : 0.5
- *     - First release
- *
- *  Nov. 14, 2003 : 0.6
- *     - Read fan speed from FCU, low level fan routines now deal
- *       with errors & check fan status, though higher level don't
- *       do much.
- *     - Move a bunch of definitions to .h file
- *
- *  Nov. 18, 2003 : 0.7
- *     - Fix build on ppc64 kernel
- *     - Move back statics definitions to .c file
- *     - Avoid calling schedule_timeout with a negative number
- *
- *  Dec. 18, 2003 : 0.8
- *     - Fix typo when reading back fan speed on 2 CPU machines
- *
- *  Mar. 11, 2004 : 0.9
- *     - Rework code accessing the ADC chips, make it more robust and
- *       closer to the chip spec. Also make sure it is configured properly,
- *        I've seen yet unexplained cases where on startup, I would have stale
- *        values in the configuration register
- *     - Switch back to use of target fan speed for PID, thus lowering
- *        pressure on i2c
- *
- *  Oct. 20, 2004 : 1.1
- *     - Add device-tree lookup for fan IDs, should detect liquid cooling
- *        pumps when present
- *     - Enable driver for PowerMac7,3 machines
- *     - Split the U3/Backside cooling on U3 & U3H versions as Darwin does
- *     - Add new CPU cooling algorithm for machines with liquid cooling
- *     - Workaround for some PowerMac7,3 with empty "fan" node in the devtree
- *     - Fix a signed/unsigned compare issue in some PID loops
- *
- *  Mar. 10, 2005 : 1.2
- *     - Add basic support for Xserve G5
- *     - Retrieve pumps min/max from EEPROM image in device-tree (broken)
- *     - Use min/max macros here or there
- *     - Latest darwin updated U3H min fan speed to 20% PWM
- *
- *  July. 06, 2006 : 1.3
- *     - Fix setting of RPM fans on Xserve G5 (they were going too fast)
- *      - Add missing slots fan control loop for Xserve G5
- *     - Lower fixed slots fan speed from 50% to 40% on desktop G5s. We
- *        still can't properly implement the control loop for these, so let's
- *        reduce the noise a little bit, it appears that 40% still gives us
- *        a pretty good air flow
- *     - Add code to "tickle" the FCU regulary so it doesn't think that
- *        we are gone while in fact, the machine just didn't need any fan
- *        speed change lately
- *
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/errno.h>
-#include <linux/kernel.h>
-#include <linux/delay.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/spinlock.h>
-#include <linux/wait.h>
-#include <linux/reboot.h>
-#include <linux/kmod.h>
-#include <linux/i2c.h>
-#include <linux/kthread.h>
-#include <linux/mutex.h>
-#include <linux/of_device.h>
-#include <linux/of_platform.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
-#include <asm/io.h>
-#include <asm/sections.h>
-#include <asm/macio.h>
-
-#include "therm_pm72.h"
-
-#define VERSION "1.3"
-
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(args...)   printk(args)
-#else
-#define DBG(args...)   do { } while(0)
-#endif
-
-
-/*
- * Driver statics
- */
-
-static struct platform_device *                of_dev;
-static struct i2c_adapter *            u3_0;
-static struct i2c_adapter *            u3_1;
-static struct i2c_adapter *            k2;
-static struct i2c_client *             fcu;
-static struct cpu_pid_state            processor_state[2];
-static struct basckside_pid_params     backside_params;
-static struct backside_pid_state       backside_state;
-static struct drives_pid_state         drives_state;
-static struct dimm_pid_state           dimms_state;
-static struct slots_pid_state          slots_state;
-static int                             state;
-static int                             cpu_count;
-static int                             cpu_pid_type;
-static struct task_struct              *ctrl_task;
-static struct completion               ctrl_complete;
-static int                             critical_state;
-static int                             rackmac;
-static s32                             dimm_output_clamp;
-static int                             fcu_rpm_shift;
-static int                             fcu_tickle_ticks;
-static DEFINE_MUTEX(driver_lock);
-
-/*
- * We have 3 types of CPU PID control. One is "split" old style control
- * for intake & exhaust fans, the other is "combined" control for both
- * CPUs that also deals with the pumps when present. To be "compatible"
- * with OS X at this point, we only use "COMBINED" on the machines that
- * are identified as having the pumps (though that identification is at
- * least dodgy). Ultimately, we could probably switch completely to this
- * algorithm provided we hack it to deal with the UP case
- */
-#define CPU_PID_TYPE_SPLIT     0
-#define CPU_PID_TYPE_COMBINED  1
-#define CPU_PID_TYPE_RACKMAC   2
-
-/*
- * This table describes all fans in the FCU. The "id" and "type" values
- * are defaults valid for all earlier machines. Newer machines will
- * eventually override the table content based on the device-tree
- */
-struct fcu_fan_table
-{
-       char*   loc;    /* location code */
-       int     type;   /* 0 = rpm, 1 = pwm, 2 = pump */
-       int     id;     /* id or -1 */
-};
-
-#define FCU_FAN_RPM            0
-#define FCU_FAN_PWM            1
-
-#define FCU_FAN_ABSENT_ID      -1
-
-#define FCU_FAN_COUNT          ARRAY_SIZE(fcu_fans)
-
-struct fcu_fan_table   fcu_fans[] = {
-       [BACKSIDE_FAN_PWM_INDEX] = {
-               .loc    = "BACKSIDE,SYS CTRLR FAN",
-               .type   = FCU_FAN_PWM,
-               .id     = BACKSIDE_FAN_PWM_DEFAULT_ID,
-       },
-       [DRIVES_FAN_RPM_INDEX] = {
-               .loc    = "DRIVE BAY",
-               .type   = FCU_FAN_RPM,
-               .id     = DRIVES_FAN_RPM_DEFAULT_ID,
-       },
-       [SLOTS_FAN_PWM_INDEX] = {
-               .loc    = "SLOT,PCI FAN",
-               .type   = FCU_FAN_PWM,
-               .id     = SLOTS_FAN_PWM_DEFAULT_ID,
-       },
-       [CPUA_INTAKE_FAN_RPM_INDEX] = {
-               .loc    = "CPU A INTAKE",
-               .type   = FCU_FAN_RPM,
-               .id     = CPUA_INTAKE_FAN_RPM_DEFAULT_ID,
-       },
-       [CPUA_EXHAUST_FAN_RPM_INDEX] = {
-               .loc    = "CPU A EXHAUST",
-               .type   = FCU_FAN_RPM,
-               .id     = CPUA_EXHAUST_FAN_RPM_DEFAULT_ID,
-       },
-       [CPUB_INTAKE_FAN_RPM_INDEX] = {
-               .loc    = "CPU B INTAKE",
-               .type   = FCU_FAN_RPM,
-               .id     = CPUB_INTAKE_FAN_RPM_DEFAULT_ID,
-       },
-       [CPUB_EXHAUST_FAN_RPM_INDEX] = {
-               .loc    = "CPU B EXHAUST",
-               .type   = FCU_FAN_RPM,
-               .id     = CPUB_EXHAUST_FAN_RPM_DEFAULT_ID,
-       },
-       /* pumps aren't present by default, have to be looked up in the
-        * device-tree
-        */
-       [CPUA_PUMP_RPM_INDEX] = {
-               .loc    = "CPU A PUMP",
-               .type   = FCU_FAN_RPM,          
-               .id     = FCU_FAN_ABSENT_ID,
-       },
-       [CPUB_PUMP_RPM_INDEX] = {
-               .loc    = "CPU B PUMP",
-               .type   = FCU_FAN_RPM,
-               .id     = FCU_FAN_ABSENT_ID,
-       },
-       /* Xserve fans */
-       [CPU_A1_FAN_RPM_INDEX] = {
-               .loc    = "CPU A 1",
-               .type   = FCU_FAN_RPM,
-               .id     = FCU_FAN_ABSENT_ID,
-       },
-       [CPU_A2_FAN_RPM_INDEX] = {
-               .loc    = "CPU A 2",
-               .type   = FCU_FAN_RPM,
-               .id     = FCU_FAN_ABSENT_ID,
-       },
-       [CPU_A3_FAN_RPM_INDEX] = {
-               .loc    = "CPU A 3",
-               .type   = FCU_FAN_RPM,
-               .id     = FCU_FAN_ABSENT_ID,
-       },
-       [CPU_B1_FAN_RPM_INDEX] = {
-               .loc    = "CPU B 1",
-               .type   = FCU_FAN_RPM,
-               .id     = FCU_FAN_ABSENT_ID,
-       },
-       [CPU_B2_FAN_RPM_INDEX] = {
-               .loc    = "CPU B 2",
-               .type   = FCU_FAN_RPM,
-               .id     = FCU_FAN_ABSENT_ID,
-       },
-       [CPU_B3_FAN_RPM_INDEX] = {
-               .loc    = "CPU B 3",
-               .type   = FCU_FAN_RPM,
-               .id     = FCU_FAN_ABSENT_ID,
-       },
-};
-
-static struct i2c_driver therm_pm72_driver;
-
-/*
- * Utility function to create an i2c_client structure and
- * attach it to one of u3 adapters
- */
-static struct i2c_client *attach_i2c_chip(int id, const char *name)
-{
-       struct i2c_client *clt;
-       struct i2c_adapter *adap;
-       struct i2c_board_info info;
-
-       if (id & 0x200)
-               adap = k2;
-       else if (id & 0x100)
-               adap = u3_1;
-       else
-               adap = u3_0;
-       if (adap == NULL)
-               return NULL;
-
-       memset(&info, 0, sizeof(struct i2c_board_info));
-       info.addr = (id >> 1) & 0x7f;
-       strlcpy(info.type, "therm_pm72", I2C_NAME_SIZE);
-       clt = i2c_new_device(adap, &info);
-       if (!clt) {
-               printk(KERN_ERR "therm_pm72: Failed to attach to i2c ID 0x%x\n", id);
-               return NULL;
-       }
-
-       /*
-        * Let i2c-core delete that device on driver removal.
-        * This is safe because i2c-core holds the core_lock mutex for us.
-        */
-       list_add_tail(&clt->detected, &therm_pm72_driver.clients);
-       return clt;
-}
-
-/*
- * Here are the i2c chip access wrappers
- */
-
-static void initialize_adc(struct cpu_pid_state *state)
-{
-       int rc;
-       u8 buf[2];
-
-       /* Read ADC the configuration register and cache it. We
-        * also make sure Config2 contains proper values, I've seen
-        * cases where we got stale grabage in there, thus preventing
-        * proper reading of conv. values
-        */
-
-       /* Clear Config2 */
-       buf[0] = 5;
-       buf[1] = 0;
-       i2c_master_send(state->monitor, buf, 2);
-
-       /* Read & cache Config1 */
-       buf[0] = 1;
-       rc = i2c_master_send(state->monitor, buf, 1);
-       if (rc > 0) {
-               rc = i2c_master_recv(state->monitor, buf, 1);
-               if (rc > 0) {
-                       state->adc_config = buf[0];
-                       DBG("ADC config reg: %02x\n", state->adc_config);
-                       /* Disable shutdown mode */
-                       state->adc_config &= 0xfe;
-                       buf[0] = 1;
-                       buf[1] = state->adc_config;
-                       rc = i2c_master_send(state->monitor, buf, 2);
-               }
-       }
-       if (rc <= 0)
-               printk(KERN_ERR "therm_pm72: Error reading ADC config"
-                      " register !\n");
-}
-
-static int read_smon_adc(struct cpu_pid_state *state, int chan)
-{
-       int rc, data, tries = 0;
-       u8 buf[2];
-
-       for (;;) {
-               /* Set channel */
-               buf[0] = 1;
-               buf[1] = (state->adc_config & 0x1f) | (chan << 5);
-               rc = i2c_master_send(state->monitor, buf, 2);
-               if (rc <= 0)
-                       goto error;
-               /* Wait for conversion */
-               msleep(1);
-               /* Switch to data register */
-               buf[0] = 4;
-               rc = i2c_master_send(state->monitor, buf, 1);
-               if (rc <= 0)
-                       goto error;
-               /* Read result */
-               rc = i2c_master_recv(state->monitor, buf, 2);
-               if (rc < 0)
-                       goto error;
-               data = ((u16)buf[0]) << 8 | (u16)buf[1];
-               return data >> 6;
-       error:
-               DBG("Error reading ADC, retrying...\n");
-               if (++tries > 10) {
-                       printk(KERN_ERR "therm_pm72: Error reading ADC !\n");
-                       return -1;
-               }
-               msleep(10);
-       }
-}
-
-static int read_lm87_reg(struct i2c_client * chip, int reg)
-{
-       int rc, tries = 0;
-       u8 buf;
-
-       for (;;) {
-               /* Set address */
-               buf = (u8)reg;
-               rc = i2c_master_send(chip, &buf, 1);
-               if (rc <= 0)
-                       goto error;
-               rc = i2c_master_recv(chip, &buf, 1);
-               if (rc <= 0)
-                       goto error;
-               return (int)buf;
-       error:
-               DBG("Error reading LM87, retrying...\n");
-               if (++tries > 10) {
-                       printk(KERN_ERR "therm_pm72: Error reading LM87 !\n");
-                       return -1;
-               }
-               msleep(10);
-       }
-}
-
-static int fan_read_reg(int reg, unsigned char *buf, int nb)
-{
-       int tries, nr, nw;
-
-       buf[0] = reg;
-       tries = 0;
-       for (;;) {
-               nw = i2c_master_send(fcu, buf, 1);
-               if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
-                       break;
-               msleep(10);
-               ++tries;
-       }
-       if (nw <= 0) {
-               printk(KERN_ERR "Failure writing address to FCU: %d", nw);
-               return -EIO;
-       }
-       tries = 0;
-       for (;;) {
-               nr = i2c_master_recv(fcu, buf, nb);
-               if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100)
-                       break;
-               msleep(10);
-               ++tries;
-       }
-       if (nr <= 0)
-               printk(KERN_ERR "Failure reading data from FCU: %d", nw);
-       return nr;
-}
-
-static int fan_write_reg(int reg, const unsigned char *ptr, int nb)
-{
-       int tries, nw;
-       unsigned char buf[16];
-
-       buf[0] = reg;
-       memcpy(buf+1, ptr, nb);
-       ++nb;
-       tries = 0;
-       for (;;) {
-               nw = i2c_master_send(fcu, buf, nb);
-               if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100)
-                       break;
-               msleep(10);
-               ++tries;
-       }
-       if (nw < 0)
-               printk(KERN_ERR "Failure writing to FCU: %d", nw);
-       return nw;
-}
-
-static int start_fcu(void)
-{
-       unsigned char buf = 0xff;
-       int rc;
-
-       rc = fan_write_reg(0xe, &buf, 1);
-       if (rc < 0)
-               return -EIO;
-       rc = fan_write_reg(0x2e, &buf, 1);
-       if (rc < 0)
-               return -EIO;
-       rc = fan_read_reg(0, &buf, 1);
-       if (rc < 0)
-               return -EIO;
-       fcu_rpm_shift = (buf == 1) ? 2 : 3;
-       printk(KERN_DEBUG "FCU Initialized, RPM fan shift is %d\n",
-              fcu_rpm_shift);
-
-       return 0;
-}
-
-static int set_rpm_fan(int fan_index, int rpm)
-{
-       unsigned char buf[2];
-       int rc, id, min, max;
-
-       if (fcu_fans[fan_index].type != FCU_FAN_RPM)
-               return -EINVAL;
-       id = fcu_fans[fan_index].id; 
-       if (id == FCU_FAN_ABSENT_ID)
-               return -EINVAL;
-
-       min = 2400 >> fcu_rpm_shift;
-       max = 56000 >> fcu_rpm_shift;
-
-       if (rpm < min)
-               rpm = min;
-       else if (rpm > max)
-               rpm = max;
-       buf[0] = rpm >> (8 - fcu_rpm_shift);
-       buf[1] = rpm << fcu_rpm_shift;
-       rc = fan_write_reg(0x10 + (id * 2), buf, 2);
-       if (rc < 0)
-               return -EIO;
-       return 0;
-}
-
-static int get_rpm_fan(int fan_index, int programmed)
-{
-       unsigned char failure;
-       unsigned char active;
-       unsigned char buf[2];
-       int rc, id, reg_base;
-
-       if (fcu_fans[fan_index].type != FCU_FAN_RPM)
-               return -EINVAL;
-       id = fcu_fans[fan_index].id; 
-       if (id == FCU_FAN_ABSENT_ID)
-               return -EINVAL;
-
-       rc = fan_read_reg(0xb, &failure, 1);
-       if (rc != 1)
-               return -EIO;
-       if ((failure & (1 << id)) != 0)
-               return -EFAULT;
-       rc = fan_read_reg(0xd, &active, 1);
-       if (rc != 1)
-               return -EIO;
-       if ((active & (1 << id)) == 0)
-               return -ENXIO;
-
-       /* Programmed value or real current speed */
-       reg_base = programmed ? 0x10 : 0x11;
-       rc = fan_read_reg(reg_base + (id * 2), buf, 2);
-       if (rc != 2)
-               return -EIO;
-
-       return (buf[0] << (8 - fcu_rpm_shift)) | buf[1] >> fcu_rpm_shift;
-}
-
-static int set_pwm_fan(int fan_index, int pwm)
-{
-       unsigned char buf[2];
-       int rc, id;
-
-       if (fcu_fans[fan_index].type != FCU_FAN_PWM)
-               return -EINVAL;
-       id = fcu_fans[fan_index].id; 
-       if (id == FCU_FAN_ABSENT_ID)
-               return -EINVAL;
-
-       if (pwm < 10)
-               pwm = 10;
-       else if (pwm > 100)
-               pwm = 100;
-       pwm = (pwm * 2559) / 1000;
-       buf[0] = pwm;
-       rc = fan_write_reg(0x30 + (id * 2), buf, 1);
-       if (rc < 0)
-               return rc;
-       return 0;
-}
-
-static int get_pwm_fan(int fan_index)
-{
-       unsigned char failure;
-       unsigned char active;
-       unsigned char buf[2];
-       int rc, id;
-
-       if (fcu_fans[fan_index].type != FCU_FAN_PWM)
-               return -EINVAL;
-       id = fcu_fans[fan_index].id; 
-       if (id == FCU_FAN_ABSENT_ID)
-               return -EINVAL;
-
-       rc = fan_read_reg(0x2b, &failure, 1);
-       if (rc != 1)
-               return -EIO;
-       if ((failure & (1 << id)) != 0)
-               return -EFAULT;
-       rc = fan_read_reg(0x2d, &active, 1);
-       if (rc != 1)
-               return -EIO;
-       if ((active & (1 << id)) == 0)
-               return -ENXIO;
-
-       /* Programmed value or real current speed */
-       rc = fan_read_reg(0x30 + (id * 2), buf, 1);
-       if (rc != 1)
-               return -EIO;
-
-       return (buf[0] * 1000) / 2559;
-}
-
-static void tickle_fcu(void)
-{
-       int pwm;
-
-       pwm = get_pwm_fan(SLOTS_FAN_PWM_INDEX);
-
-       DBG("FCU Tickle, slots fan is: %d\n", pwm);
-       if (pwm < 0)
-               pwm = 100;
-
-       if (!rackmac) {
-               pwm = SLOTS_FAN_DEFAULT_PWM;
-       } else if (pwm < SLOTS_PID_OUTPUT_MIN)
-               pwm = SLOTS_PID_OUTPUT_MIN;
-
-       /* That is hopefully enough to make the FCU happy */
-       set_pwm_fan(SLOTS_FAN_PWM_INDEX, pwm);
-}
-
-
-/*
- * Utility routine to read the CPU calibration EEPROM data
- * from the device-tree
- */
-static int read_eeprom(int cpu, struct mpu_data *out)
-{
-       struct device_node *np;
-       char nodename[64];
-       const u8 *data;
-       int len;
-
-       /* prom.c routine for finding a node by path is a bit brain dead
-        * and requires exact @xxx unit numbers. This is a bit ugly but
-        * will work for these machines
-        */
-       sprintf(nodename, "/u3@0,f8000000/i2c@f8001000/cpuid@a%d", cpu ? 2 : 0);
-       np = of_find_node_by_path(nodename);
-       if (np == NULL) {
-               printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid node from device-tree\n");
-               return -ENODEV;
-       }
-       data = of_get_property(np, "cpuid", &len);
-       if (data == NULL) {
-               printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid property from device-tree\n");
-               of_node_put(np);
-               return -ENODEV;
-       }
-       memcpy(out, data, sizeof(struct mpu_data));
-       of_node_put(np);
-       
-       return 0;
-}
-
-static void fetch_cpu_pumps_minmax(void)
-{
-       struct cpu_pid_state *state0 = &processor_state[0];
-       struct cpu_pid_state *state1 = &processor_state[1];
-       u16 pump_min = 0, pump_max = 0xffff;
-       u16 tmp[4];
-
-       /* Try to fetch pumps min/max infos from eeprom */
-
-       memcpy(&tmp, &state0->mpu.processor_part_num, 8);
-       if (tmp[0] != 0xffff && tmp[1] != 0xffff) {
-               pump_min = max(pump_min, tmp[0]);
-               pump_max = min(pump_max, tmp[1]);
-       }
-       if (tmp[2] != 0xffff && tmp[3] != 0xffff) {
-               pump_min = max(pump_min, tmp[2]);
-               pump_max = min(pump_max, tmp[3]);
-       }
-
-       /* Double check the values, this _IS_ needed as the EEPROM on
-        * some dual 2.5Ghz G5s seem, at least, to have both min & max
-        * same to the same value ... (grrrr)
-        */
-       if (pump_min == pump_max || pump_min == 0 || pump_max == 0xffff) {
-               pump_min = CPU_PUMP_OUTPUT_MIN;
-               pump_max = CPU_PUMP_OUTPUT_MAX;
-       }
-
-       state0->pump_min = state1->pump_min = pump_min;
-       state0->pump_max = state1->pump_max = pump_max;
-}
-
-/* 
- * Now, unfortunately, sysfs doesn't give us a nice void * we could
- * pass around to the attribute functions, so we don't really have
- * choice but implement a bunch of them...
- *
- * That sucks a bit, we take the lock because FIX32TOPRINT evaluates
- * the input twice... I accept patches :)
- */
-#define BUILD_SHOW_FUNC_FIX(name, data)                                \
-static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf)       \
-{                                                              \
-       ssize_t r;                                              \
-       mutex_lock(&driver_lock);                                       \
-       r = sprintf(buf, "%d.%03d", FIX32TOPRINT(data));        \
-       mutex_unlock(&driver_lock);                                     \
-       return r;                                               \
-}
-#define BUILD_SHOW_FUNC_INT(name, data)                                \
-static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf)       \
-{                                                              \
-       return sprintf(buf, "%d", data);                        \
-}
-
-BUILD_SHOW_FUNC_FIX(cpu0_temperature, processor_state[0].last_temp)
-BUILD_SHOW_FUNC_FIX(cpu0_voltage, processor_state[0].voltage)
-BUILD_SHOW_FUNC_FIX(cpu0_current, processor_state[0].current_a)
-BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, processor_state[0].rpm)
-BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, processor_state[0].intake_rpm)
-
-BUILD_SHOW_FUNC_FIX(cpu1_temperature, processor_state[1].last_temp)
-BUILD_SHOW_FUNC_FIX(cpu1_voltage, processor_state[1].voltage)
-BUILD_SHOW_FUNC_FIX(cpu1_current, processor_state[1].current_a)
-BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, processor_state[1].rpm)
-BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, processor_state[1].intake_rpm)
-
-BUILD_SHOW_FUNC_FIX(backside_temperature, backside_state.last_temp)
-BUILD_SHOW_FUNC_INT(backside_fan_pwm, backside_state.pwm)
-
-BUILD_SHOW_FUNC_FIX(drives_temperature, drives_state.last_temp)
-BUILD_SHOW_FUNC_INT(drives_fan_rpm, drives_state.rpm)
-
-BUILD_SHOW_FUNC_FIX(slots_temperature, slots_state.last_temp)
-BUILD_SHOW_FUNC_INT(slots_fan_pwm, slots_state.pwm)
-
-BUILD_SHOW_FUNC_FIX(dimms_temperature, dimms_state.last_temp)
-
-static DEVICE_ATTR(cpu0_temperature,S_IRUGO,show_cpu0_temperature,NULL);
-static DEVICE_ATTR(cpu0_voltage,S_IRUGO,show_cpu0_voltage,NULL);
-static DEVICE_ATTR(cpu0_current,S_IRUGO,show_cpu0_current,NULL);
-static DEVICE_ATTR(cpu0_exhaust_fan_rpm,S_IRUGO,show_cpu0_exhaust_fan_rpm,NULL);
-static DEVICE_ATTR(cpu0_intake_fan_rpm,S_IRUGO,show_cpu0_intake_fan_rpm,NULL);
-
-static DEVICE_ATTR(cpu1_temperature,S_IRUGO,show_cpu1_temperature,NULL);
-static DEVICE_ATTR(cpu1_voltage,S_IRUGO,show_cpu1_voltage,NULL);
-static DEVICE_ATTR(cpu1_current,S_IRUGO,show_cpu1_current,NULL);
-static DEVICE_ATTR(cpu1_exhaust_fan_rpm,S_IRUGO,show_cpu1_exhaust_fan_rpm,NULL);
-static DEVICE_ATTR(cpu1_intake_fan_rpm,S_IRUGO,show_cpu1_intake_fan_rpm,NULL);
-
-static DEVICE_ATTR(backside_temperature,S_IRUGO,show_backside_temperature,NULL);
-static DEVICE_ATTR(backside_fan_pwm,S_IRUGO,show_backside_fan_pwm,NULL);
-
-static DEVICE_ATTR(drives_temperature,S_IRUGO,show_drives_temperature,NULL);
-static DEVICE_ATTR(drives_fan_rpm,S_IRUGO,show_drives_fan_rpm,NULL);
-
-static DEVICE_ATTR(slots_temperature,S_IRUGO,show_slots_temperature,NULL);
-static DEVICE_ATTR(slots_fan_pwm,S_IRUGO,show_slots_fan_pwm,NULL);
-
-static DEVICE_ATTR(dimms_temperature,S_IRUGO,show_dimms_temperature,NULL);
-
-/*
- * CPUs fans control loop
- */
-
-static int do_read_one_cpu_values(struct cpu_pid_state *state, s32 *temp, s32 *power)
-{
-       s32 ltemp, volts, amps;
-       int index, rc = 0;
-
-       /* Default (in case of error) */
-       *temp = state->cur_temp;
-       *power = state->cur_power;
-
-       if (cpu_pid_type == CPU_PID_TYPE_RACKMAC)
-               index = (state->index == 0) ?
-                       CPU_A1_FAN_RPM_INDEX : CPU_B1_FAN_RPM_INDEX;
-       else
-               index = (state->index == 0) ?
-                       CPUA_EXHAUST_FAN_RPM_INDEX : CPUB_EXHAUST_FAN_RPM_INDEX;
-
-       /* Read current fan status */
-       rc = get_rpm_fan(index, !RPM_PID_USE_ACTUAL_SPEED);
-       if (rc < 0) {
-               /* XXX What do we do now ? Nothing for now, keep old value, but
-                * return error upstream
-                */
-               DBG("  cpu %d, fan reading error !\n", state->index);
-       } else {
-               state->rpm = rc;
-               DBG("  cpu %d, exhaust RPM: %d\n", state->index, state->rpm);
-       }
-
-       /* Get some sensor readings and scale it */
-       ltemp = read_smon_adc(state, 1);
-       if (ltemp == -1) {
-               /* XXX What do we do now ? */
-               state->overtemp++;
-               if (rc == 0)
-                       rc = -EIO;
-               DBG("  cpu %d, temp reading error !\n", state->index);
-       } else {
-               /* Fixup temperature according to diode calibration
-                */
-               DBG("  cpu %d, temp raw: %04x, m_diode: %04x, b_diode: %04x\n",
-                   state->index,
-                   ltemp, state->mpu.mdiode, state->mpu.bdiode);
-               *temp = ((s32)ltemp * (s32)state->mpu.mdiode + ((s32)state->mpu.bdiode << 12)) >> 2;
-               state->last_temp = *temp;
-               DBG("  temp: %d.%03d\n", FIX32TOPRINT((*temp)));
-       }
-
-       /*
-        * Read voltage & current and calculate power
-        */
-       volts = read_smon_adc(state, 3);
-       amps = read_smon_adc(state, 4);
-
-       /* Scale voltage and current raw sensor values according to fixed scales
-        * obtained in Darwin and calculate power from I and V
-        */
-       volts *= ADC_CPU_VOLTAGE_SCALE;
-       amps *= ADC_CPU_CURRENT_SCALE;
-       *power = (((u64)volts) * ((u64)amps)) >> 16;
-       state->voltage = volts;
-       state->current_a = amps;
-       state->last_power = *power;
-
-       DBG("  cpu %d, current: %d.%03d, voltage: %d.%03d, power: %d.%03d W\n",
-           state->index, FIX32TOPRINT(state->current_a),
-           FIX32TOPRINT(state->voltage), FIX32TOPRINT(*power));
-
-       return 0;
-}
-
-static void do_cpu_pid(struct cpu_pid_state *state, s32 temp, s32 power)
-{
-       s32 power_target, integral, derivative, proportional, adj_in_target, sval;
-       s64 integ_p, deriv_p, prop_p, sum; 
-       int i;
-
-       /* Calculate power target value (could be done once for all)
-        * and convert to a 16.16 fp number
-        */
-       power_target = ((u32)(state->mpu.pmaxh - state->mpu.padjmax)) << 16;
-       DBG("  power target: %d.%03d, error: %d.%03d\n",
-           FIX32TOPRINT(power_target), FIX32TOPRINT(power_target - power));
-
-       /* Store temperature and power in history array */
-       state->cur_temp = (state->cur_temp + 1) % CPU_TEMP_HISTORY_SIZE;
-       state->temp_history[state->cur_temp] = temp;
-       state->cur_power = (state->cur_power + 1) % state->count_power;
-       state->power_history[state->cur_power] = power;
-       state->error_history[state->cur_power] = power_target - power;
-       
-       /* If first loop, fill the history table */
-       if (state->first) {
-               for (i = 0; i < (state->count_power - 1); i++) {
-                       state->cur_power = (state->cur_power + 1) % state->count_power;
-                       state->power_history[state->cur_power] = power;
-                       state->error_history[state->cur_power] = power_target - power;
-               }
-               for (i = 0; i < (CPU_TEMP_HISTORY_SIZE - 1); i++) {
-                       state->cur_temp = (state->cur_temp + 1) % CPU_TEMP_HISTORY_SIZE;
-                       state->temp_history[state->cur_temp] = temp;                    
-               }
-               state->first = 0;
-       }
-
-       /* Calculate the integral term normally based on the "power" values */
-       sum = 0;
-       integral = 0;
-       for (i = 0; i < state->count_power; i++)
-               integral += state->error_history[i];
-       integral *= CPU_PID_INTERVAL;
-       DBG("  integral: %08x\n", integral);
-
-       /* Calculate the adjusted input (sense value).
-        *   G_r is 12.20
-        *   integ is 16.16
-        *   so the result is 28.36
-        *
-        * input target is mpu.ttarget, input max is mpu.tmax
-        */
-       integ_p = ((s64)state->mpu.pid_gr) * (s64)integral;
-       DBG("   integ_p: %d\n", (int)(integ_p >> 36));
-       sval = (state->mpu.tmax << 16) - ((integ_p >> 20) & 0xffffffff);
-       adj_in_target = (state->mpu.ttarget << 16);
-       if (adj_in_target > sval)
-               adj_in_target = sval;
-       DBG("   adj_in_target: %d.%03d, ttarget: %d\n", FIX32TOPRINT(adj_in_target),
-           state->mpu.ttarget);
-
-       /* Calculate the derivative term */
-       derivative = state->temp_history[state->cur_temp] -
-               state->temp_history[(state->cur_temp + CPU_TEMP_HISTORY_SIZE - 1)
-                                   % CPU_TEMP_HISTORY_SIZE];
-       derivative /= CPU_PID_INTERVAL;
-       deriv_p = ((s64)state->mpu.pid_gd) * (s64)derivative;
-       DBG("   deriv_p: %d\n", (int)(deriv_p >> 36));
-       sum += deriv_p;
-
-       /* Calculate the proportional term */
-       proportional = temp - adj_in_target;
-       prop_p = ((s64)state->mpu.pid_gp) * (s64)proportional;
-       DBG("   prop_p: %d\n", (int)(prop_p >> 36));
-       sum += prop_p;
-
-       /* Scale sum */
-       sum >>= 36;
-
-       DBG("   sum: %d\n", (int)sum);
-       state->rpm += (s32)sum;
-}
-
-static void do_monitor_cpu_combined(void)
-{
-       struct cpu_pid_state *state0 = &processor_state[0];
-       struct cpu_pid_state *state1 = &processor_state[1];
-       s32 temp0, power0, temp1, power1;
-       s32 temp_combi, power_combi;
-       int rc, intake, pump;
-
-       rc = do_read_one_cpu_values(state0, &temp0, &power0);
-       if (rc < 0) {
-               /* XXX What do we do now ? */
-       }
-       state1->overtemp = 0;
-       rc = do_read_one_cpu_values(state1, &temp1, &power1);
-       if (rc < 0) {
-               /* XXX What do we do now ? */
-       }
-       if (state1->overtemp)
-               state0->overtemp++;
-
-       temp_combi = max(temp0, temp1);
-       power_combi = max(power0, power1);
-
-       /* Check tmax, increment overtemp if we are there. At tmax+8, we go
-        * full blown immediately and try to trigger a shutdown
-        */
-       if (temp_combi >= ((state0->mpu.tmax + 8) << 16)) {
-               printk(KERN_WARNING "Warning ! Temperature way above maximum (%d) !\n",
-                      temp_combi >> 16);
-               state0->overtemp += CPU_MAX_OVERTEMP / 4;
-       } else if (temp_combi > (state0->mpu.tmax << 16)) {
-               state0->overtemp++;
-               printk(KERN_WARNING "Temperature %d above max %d. overtemp %d\n",
-                      temp_combi >> 16, state0->mpu.tmax, state0->overtemp);
-       } else {
-               if (state0->overtemp)
-                       printk(KERN_WARNING "Temperature back down to %d\n",
-                              temp_combi >> 16);
-               state0->overtemp = 0;
-       }
-       if (state0->overtemp >= CPU_MAX_OVERTEMP)
-               critical_state = 1;
-       if (state0->overtemp > 0) {
-               state0->rpm = state0->mpu.rmaxn_exhaust_fan;
-               state0->intake_rpm = intake = state0->mpu.rmaxn_intake_fan;
-               pump = state0->pump_max;
-               goto do_set_fans;
-       }
-
-       /* Do the PID */
-       do_cpu_pid(state0, temp_combi, power_combi);
-
-       /* Range check */
-       state0->rpm = max(state0->rpm, (int)state0->mpu.rminn_exhaust_fan);
-       state0->rpm = min(state0->rpm, (int)state0->mpu.rmaxn_exhaust_fan);
-
-       /* Calculate intake fan speed */
-       intake = (state0->rpm * CPU_INTAKE_SCALE) >> 16;
-       intake = max(intake, (int)state0->mpu.rminn_intake_fan);
-       intake = min(intake, (int)state0->mpu.rmaxn_intake_fan);
-       state0->intake_rpm = intake;
-
-       /* Calculate pump speed */
-       pump = (state0->rpm * state0->pump_max) /
-               state0->mpu.rmaxn_exhaust_fan;
-       pump = min(pump, state0->pump_max);
-       pump = max(pump, state0->pump_min);
-       
- do_set_fans:
-       /* We copy values from state 0 to state 1 for /sysfs */
-       state1->rpm = state0->rpm;
-       state1->intake_rpm = state0->intake_rpm;
-
-       DBG("** CPU %d RPM: %d Ex, %d, Pump: %d, In, overtemp: %d\n",
-           state1->index, (int)state1->rpm, intake, pump, state1->overtemp);
-
-       /* We should check for errors, shouldn't we ? But then, what
-        * do we do once the error occurs ? For FCU notified fan
-        * failures (-EFAULT) we probably want to notify userland
-        * some way...
-        */
-       set_rpm_fan(CPUA_INTAKE_FAN_RPM_INDEX, intake);
-       set_rpm_fan(CPUA_EXHAUST_FAN_RPM_INDEX, state0->rpm);
-       set_rpm_fan(CPUB_INTAKE_FAN_RPM_INDEX, intake);
-       set_rpm_fan(CPUB_EXHAUST_FAN_RPM_INDEX, state0->rpm);
-
-       if (fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID)
-               set_rpm_fan(CPUA_PUMP_RPM_INDEX, pump);
-       if (fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID)
-               set_rpm_fan(CPUB_PUMP_RPM_INDEX, pump);
-}
-
-static void do_monitor_cpu_split(struct cpu_pid_state *state)
-{
-       s32 temp, power;
-       int rc, intake;
-
-       /* Read current fan status */
-       rc = do_read_one_cpu_values(state, &temp, &power);
-       if (rc < 0) {
-               /* XXX What do we do now ? */
-       }
-
-       /* Check tmax, increment overtemp if we are there. At tmax+8, we go
-        * full blown immediately and try to trigger a shutdown
-        */
-       if (temp >= ((state->mpu.tmax + 8) << 16)) {
-               printk(KERN_WARNING "Warning ! CPU %d temperature way above maximum"
-                      " (%d) !\n",
-                      state->index, temp >> 16);
-               state->overtemp += CPU_MAX_OVERTEMP / 4;
-       } else if (temp > (state->mpu.tmax << 16)) {
-               state->overtemp++;
-               printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n",
-                      state->index, temp >> 16, state->mpu.tmax, state->overtemp);
-       } else {
-               if (state->overtemp)
-                       printk(KERN_WARNING "CPU %d temperature back down to %d\n",
-                              state->index, temp >> 16);
-               state->overtemp = 0;
-       }
-       if (state->overtemp >= CPU_MAX_OVERTEMP)
-               critical_state = 1;
-       if (state->overtemp > 0) {
-               state->rpm = state->mpu.rmaxn_exhaust_fan;
-               state->intake_rpm = intake = state->mpu.rmaxn_intake_fan;
-               goto do_set_fans;
-       }
-
-       /* Do the PID */
-       do_cpu_pid(state, temp, power);
-
-       /* Range check */
-       state->rpm = max(state->rpm, (int)state->mpu.rminn_exhaust_fan);
-       state->rpm = min(state->rpm, (int)state->mpu.rmaxn_exhaust_fan);
-
-       /* Calculate intake fan */
-       intake = (state->rpm * CPU_INTAKE_SCALE) >> 16;
-       intake = max(intake, (int)state->mpu.rminn_intake_fan);
-       intake = min(intake, (int)state->mpu.rmaxn_intake_fan);
-       state->intake_rpm = intake;
-
- do_set_fans:
-       DBG("** CPU %d RPM: %d Ex, %d In, overtemp: %d\n",
-           state->index, (int)state->rpm, intake, state->overtemp);
-
-       /* We should check for errors, shouldn't we ? But then, what
-        * do we do once the error occurs ? For FCU notified fan
-        * failures (-EFAULT) we probably want to notify userland
-        * some way...
-        */
-       if (state->index == 0) {
-               set_rpm_fan(CPUA_INTAKE_FAN_RPM_INDEX, intake);
-               set_rpm_fan(CPUA_EXHAUST_FAN_RPM_INDEX, state->rpm);
-       } else {
-               set_rpm_fan(CPUB_INTAKE_FAN_RPM_INDEX, intake);
-               set_rpm_fan(CPUB_EXHAUST_FAN_RPM_INDEX, state->rpm);
-       }
-}
-
-static void do_monitor_cpu_rack(struct cpu_pid_state *state)
-{
-       s32 temp, power, fan_min;
-       int rc;
-
-       /* Read current fan status */
-       rc = do_read_one_cpu_values(state, &temp, &power);
-       if (rc < 0) {
-               /* XXX What do we do now ? */
-       }
-
-       /* Check tmax, increment overtemp if we are there. At tmax+8, we go
-        * full blown immediately and try to trigger a shutdown
-        */
-       if (temp >= ((state->mpu.tmax + 8) << 16)) {
-               printk(KERN_WARNING "Warning ! CPU %d temperature way above maximum"
-                      " (%d) !\n",
-                      state->index, temp >> 16);
-               state->overtemp = CPU_MAX_OVERTEMP / 4;
-       } else if (temp > (state->mpu.tmax << 16)) {
-               state->overtemp++;
-               printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n",
-                      state->index, temp >> 16, state->mpu.tmax, state->overtemp);
-       } else {
-               if (state->overtemp)
-                       printk(KERN_WARNING "CPU %d temperature back down to %d\n",
-                              state->index, temp >> 16);
-               state->overtemp = 0;
-       }
-       if (state->overtemp >= CPU_MAX_OVERTEMP)
-               critical_state = 1;
-       if (state->overtemp > 0) {
-               state->rpm = state->intake_rpm = state->mpu.rmaxn_intake_fan;
-               goto do_set_fans;
-       }
-
-       /* Do the PID */
-       do_cpu_pid(state, temp, power);
-
-       /* Check clamp from dimms */
-       fan_min = dimm_output_clamp;
-       fan_min = max(fan_min, (int)state->mpu.rminn_intake_fan);
-
-       DBG(" CPU min mpu = %d, min dimm = %d\n",
-           state->mpu.rminn_intake_fan, dimm_output_clamp);
-
-       state->rpm = max(state->rpm, (int)fan_min);
-       state->rpm = min(state->rpm, (int)state->mpu.rmaxn_intake_fan);
-       state->intake_rpm = state->rpm;
-
- do_set_fans:
-       DBG("** CPU %d RPM: %d overtemp: %d\n",
-           state->index, (int)state->rpm, state->overtemp);
-
-       /* We should check for errors, shouldn't we ? But then, what
-        * do we do once the error occurs ? For FCU notified fan
-        * failures (-EFAULT) we probably want to notify userland
-        * some way...
-        */
-       if (state->index == 0) {
-               set_rpm_fan(CPU_A1_FAN_RPM_INDEX, state->rpm);
-               set_rpm_fan(CPU_A2_FAN_RPM_INDEX, state->rpm);
-               set_rpm_fan(CPU_A3_FAN_RPM_INDEX, state->rpm);
-       } else {
-               set_rpm_fan(CPU_B1_FAN_RPM_INDEX, state->rpm);
-               set_rpm_fan(CPU_B2_FAN_RPM_INDEX, state->rpm);
-               set_rpm_fan(CPU_B3_FAN_RPM_INDEX, state->rpm);
-       }
-}
-
-/*
- * Initialize the state structure for one CPU control loop
- */
-static int init_processor_state(struct cpu_pid_state *state, int index)
-{
-       int err;
-
-       state->index = index;
-       state->first = 1;
-       state->rpm = (cpu_pid_type == CPU_PID_TYPE_RACKMAC) ? 4000 : 1000;
-       state->overtemp = 0;
-       state->adc_config = 0x00;
-
-
-       if (index == 0)
-               state->monitor = attach_i2c_chip(SUPPLY_MONITOR_ID, "CPU0_monitor");
-       else if (index == 1)
-               state->monitor = attach_i2c_chip(SUPPLY_MONITORB_ID, "CPU1_monitor");
-       if (state->monitor == NULL)
-               goto fail;
-
-       if (read_eeprom(index, &state->mpu))
-               goto fail;
-
-       state->count_power = state->mpu.tguardband;
-       if (state->count_power > CPU_POWER_HISTORY_SIZE) {
-               printk(KERN_WARNING "Warning ! too many power history slots\n");
-               state->count_power = CPU_POWER_HISTORY_SIZE;
-       }
-       DBG("CPU %d Using %d power history entries\n", index, state->count_power);
-
-       if (index == 0) {
-               err = device_create_file(&of_dev->dev, &dev_attr_cpu0_temperature);
-               err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_voltage);
-               err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_current);
-               err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm);
-               err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm);
-       } else {
-               err = device_create_file(&of_dev->dev, &dev_attr_cpu1_temperature);
-               err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_voltage);
-               err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_current);
-               err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm);
-               err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm);
-       }
-       if (err)
-               printk(KERN_WARNING "Failed to create some of the attribute"
-                       "files for CPU %d\n", index);
-
-       return 0;
- fail:
-       state->monitor = NULL;
-       
-       return -ENODEV;
-}
-
-/*
- * Dispose of the state data for one CPU control loop
- */
-static void dispose_processor_state(struct cpu_pid_state *state)
-{
-       if (state->monitor == NULL)
-               return;
-
-       if (state->index == 0) {
-               device_remove_file(&of_dev->dev, &dev_attr_cpu0_temperature);
-               device_remove_file(&of_dev->dev, &dev_attr_cpu0_voltage);
-               device_remove_file(&of_dev->dev, &dev_attr_cpu0_current);
-               device_remove_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm);
-               device_remove_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm);
-       } else {
-               device_remove_file(&of_dev->dev, &dev_attr_cpu1_temperature);
-               device_remove_file(&of_dev->dev, &dev_attr_cpu1_voltage);
-               device_remove_file(&of_dev->dev, &dev_attr_cpu1_current);
-               device_remove_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm);
-               device_remove_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm);
-       }
-
-       state->monitor = NULL;
-}
-
-/*
- * Motherboard backside & U3 heatsink fan control loop
- */
-static void do_monitor_backside(struct backside_pid_state *state)
-{
-       s32 temp, integral, derivative, fan_min;
-       s64 integ_p, deriv_p, prop_p, sum; 
-       int i, rc;
-
-       if (--state->ticks != 0)
-               return;
-       state->ticks = backside_params.interval;
-
-       DBG("backside:\n");
-
-       /* Check fan status */
-       rc = get_pwm_fan(BACKSIDE_FAN_PWM_INDEX);
-       if (rc < 0) {
-               printk(KERN_WARNING "Error %d reading backside fan !\n", rc);
-               /* XXX What do we do now ? */
-       } else
-               state->pwm = rc;
-       DBG("  current pwm: %d\n", state->pwm);
-
-       /* Get some sensor readings */
-       temp = i2c_smbus_read_byte_data(state->monitor, MAX6690_EXT_TEMP) << 16;
-       state->last_temp = temp;
-       DBG("  temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
-           FIX32TOPRINT(backside_params.input_target));
-
-       /* Store temperature and error in history array */
-       state->cur_sample = (state->cur_sample + 1) % BACKSIDE_PID_HISTORY_SIZE;
-       state->sample_history[state->cur_sample] = temp;
-       state->error_history[state->cur_sample] = temp - backside_params.input_target;
-       
-       /* If first loop, fill the history table */
-       if (state->first) {
-               for (i = 0; i < (BACKSIDE_PID_HISTORY_SIZE - 1); i++) {
-                       state->cur_sample = (state->cur_sample + 1) %
-                               BACKSIDE_PID_HISTORY_SIZE;
-                       state->sample_history[state->cur_sample] = temp;
-                       state->error_history[state->cur_sample] =
-                               temp - backside_params.input_target;
-               }
-               state->first = 0;
-       }
-
-       /* Calculate the integral term */
-       sum = 0;
-       integral = 0;
-       for (i = 0; i < BACKSIDE_PID_HISTORY_SIZE; i++)
-               integral += state->error_history[i];
-       integral *= backside_params.interval;
-       DBG("  integral: %08x\n", integral);
-       integ_p = ((s64)backside_params.G_r) * (s64)integral;
-       DBG("   integ_p: %d\n", (int)(integ_p >> 36));
-       sum += integ_p;
-
-       /* Calculate the derivative term */
-       derivative = state->error_history[state->cur_sample] -
-               state->error_history[(state->cur_sample + BACKSIDE_PID_HISTORY_SIZE - 1)
-                                   % BACKSIDE_PID_HISTORY_SIZE];
-       derivative /= backside_params.interval;
-       deriv_p = ((s64)backside_params.G_d) * (s64)derivative;
-       DBG("   deriv_p: %d\n", (int)(deriv_p >> 36));
-       sum += deriv_p;
-
-       /* Calculate the proportional term */
-       prop_p = ((s64)backside_params.G_p) * (s64)(state->error_history[state->cur_sample]);
-       DBG("   prop_p: %d\n", (int)(prop_p >> 36));
-       sum += prop_p;
-
-       /* Scale sum */
-       sum >>= 36;
-
-       DBG("   sum: %d\n", (int)sum);
-       if (backside_params.additive)
-               state->pwm += (s32)sum;
-       else
-               state->pwm = sum;
-
-       /* Check for clamp */
-       fan_min = (dimm_output_clamp * 100) / 14000;
-       fan_min = max(fan_min, backside_params.output_min);
-
-       state->pwm = max(state->pwm, fan_min);
-       state->pwm = min(state->pwm, backside_params.output_max);
-
-       DBG("** BACKSIDE PWM: %d\n", (int)state->pwm);
-       set_pwm_fan(BACKSIDE_FAN_PWM_INDEX, state->pwm);
-}
-
-/*
- * Initialize the state structure for the backside fan control loop
- */
-static int init_backside_state(struct backside_pid_state *state)
-{
-       struct device_node *u3;
-       int u3h = 1; /* conservative by default */
-       int err;
-
-       /*
-        * There are different PID params for machines with U3 and machines
-        * with U3H, pick the right ones now
-        */
-       u3 = of_find_node_by_path("/u3@0,f8000000");
-       if (u3 != NULL) {
-               const u32 *vers = of_get_property(u3, "device-rev", NULL);
-               if (vers)
-                       if (((*vers) & 0x3f) < 0x34)
-                               u3h = 0;
-               of_node_put(u3);
-       }
-
-       if (rackmac) {
-               backside_params.G_d = BACKSIDE_PID_RACK_G_d;
-               backside_params.input_target = BACKSIDE_PID_RACK_INPUT_TARGET;
-               backside_params.output_min = BACKSIDE_PID_U3H_OUTPUT_MIN;
-               backside_params.interval = BACKSIDE_PID_RACK_INTERVAL;
-               backside_params.G_p = BACKSIDE_PID_RACK_G_p;
-               backside_params.G_r = BACKSIDE_PID_G_r;
-               backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX;
-               backside_params.additive = 0;
-       } else if (u3h) {
-               backside_params.G_d = BACKSIDE_PID_U3H_G_d;
-               backside_params.input_target = BACKSIDE_PID_U3H_INPUT_TARGET;
-               backside_params.output_min = BACKSIDE_PID_U3H_OUTPUT_MIN;
-               backside_params.interval = BACKSIDE_PID_INTERVAL;
-               backside_params.G_p = BACKSIDE_PID_G_p;
-               backside_params.G_r = BACKSIDE_PID_G_r;
-               backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX;
-               backside_params.additive = 1;
-       } else {
-               backside_params.G_d = BACKSIDE_PID_U3_G_d;
-               backside_params.input_target = BACKSIDE_PID_U3_INPUT_TARGET;
-               backside_params.output_min = BACKSIDE_PID_U3_OUTPUT_MIN;
-               backside_params.interval = BACKSIDE_PID_INTERVAL;
-               backside_params.G_p = BACKSIDE_PID_G_p;
-               backside_params.G_r = BACKSIDE_PID_G_r;
-               backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX;
-               backside_params.additive = 1;
-       }
-
-       state->ticks = 1;
-       state->first = 1;
-       state->pwm = 50;
-
-       state->monitor = attach_i2c_chip(BACKSIDE_MAX_ID, "backside_temp");
-       if (state->monitor == NULL)
-               return -ENODEV;
-
-       err = device_create_file(&of_dev->dev, &dev_attr_backside_temperature);
-       err |= device_create_file(&of_dev->dev, &dev_attr_backside_fan_pwm);
-       if (err)
-               printk(KERN_WARNING "Failed to create attribute file(s)"
-                       " for backside fan\n");
-
-       return 0;
-}
-
-/*
- * Dispose of the state data for the backside control loop
- */
-static void dispose_backside_state(struct backside_pid_state *state)
-{
-       if (state->monitor == NULL)
-               return;
-
-       device_remove_file(&of_dev->dev, &dev_attr_backside_temperature);
-       device_remove_file(&of_dev->dev, &dev_attr_backside_fan_pwm);
-
-       state->monitor = NULL;
-}
-/*
- * Drives bay fan control loop
- */
-static void do_monitor_drives(struct drives_pid_state *state)
-{
-       s32 temp, integral, derivative;
-       s64 integ_p, deriv_p, prop_p, sum; 
-       int i, rc;
-
-       if (--state->ticks != 0)
-               return;
-       state->ticks = DRIVES_PID_INTERVAL;
-
-       DBG("drives:\n");
-
-       /* Check fan status */
-       rc = get_rpm_fan(DRIVES_FAN_RPM_INDEX, !RPM_PID_USE_ACTUAL_SPEED);
-       if (rc < 0) {
-               printk(KERN_WARNING "Error %d reading drives fan !\n", rc);
-               /* XXX What do we do now ? */
-       } else
-               state->rpm = rc;
-       DBG("  current rpm: %d\n", state->rpm);
-
-       /* Get some sensor readings */
-       temp = le16_to_cpu(i2c_smbus_read_word_data(state->monitor,
-                                                   DS1775_TEMP)) << 8;
-       state->last_temp = temp;
-       DBG("  temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
-           FIX32TOPRINT(DRIVES_PID_INPUT_TARGET));
-
-       /* Store temperature and error in history array */
-       state->cur_sample = (state->cur_sample + 1) % DRIVES_PID_HISTORY_SIZE;
-       state->sample_history[state->cur_sample] = temp;
-       state->error_history[state->cur_sample] = temp - DRIVES_PID_INPUT_TARGET;
-       
-       /* If first loop, fill the history table */
-       if (state->first) {
-               for (i = 0; i < (DRIVES_PID_HISTORY_SIZE - 1); i++) {
-                       state->cur_sample = (state->cur_sample + 1) %
-                               DRIVES_PID_HISTORY_SIZE;
-                       state->sample_history[state->cur_sample] = temp;
-                       state->error_history[state->cur_sample] =
-                               temp - DRIVES_PID_INPUT_TARGET;
-               }
-               state->first = 0;
-       }
-
-       /* Calculate the integral term */
-       sum = 0;
-       integral = 0;
-       for (i = 0; i < DRIVES_PID_HISTORY_SIZE; i++)
-               integral += state->error_history[i];
-       integral *= DRIVES_PID_INTERVAL;
-       DBG("  integral: %08x\n", integral);
-       integ_p = ((s64)DRIVES_PID_G_r) * (s64)integral;
-       DBG("   integ_p: %d\n", (int)(integ_p >> 36));
-       sum += integ_p;
-
-       /* Calculate the derivative term */
-       derivative = state->error_history[state->cur_sample] -
-               state->error_history[(state->cur_sample + DRIVES_PID_HISTORY_SIZE - 1)
-                                   % DRIVES_PID_HISTORY_SIZE];
-       derivative /= DRIVES_PID_INTERVAL;
-       deriv_p = ((s64)DRIVES_PID_G_d) * (s64)derivative;
-       DBG("   deriv_p: %d\n", (int)(deriv_p >> 36));
-       sum += deriv_p;
-
-       /* Calculate the proportional term */
-       prop_p = ((s64)DRIVES_PID_G_p) * (s64)(state->error_history[state->cur_sample]);
-       DBG("   prop_p: %d\n", (int)(prop_p >> 36));
-       sum += prop_p;
-
-       /* Scale sum */
-       sum >>= 36;
-
-       DBG("   sum: %d\n", (int)sum);
-       state->rpm += (s32)sum;
-
-       state->rpm = max(state->rpm, DRIVES_PID_OUTPUT_MIN);
-       state->rpm = min(state->rpm, DRIVES_PID_OUTPUT_MAX);
-
-       DBG("** DRIVES RPM: %d\n", (int)state->rpm);
-       set_rpm_fan(DRIVES_FAN_RPM_INDEX, state->rpm);
-}
-
-/*
- * Initialize the state structure for the drives bay fan control loop
- */
-static int init_drives_state(struct drives_pid_state *state)
-{
-       int err;
-
-       state->ticks = 1;
-       state->first = 1;
-       state->rpm = 1000;
-
-       state->monitor = attach_i2c_chip(DRIVES_DALLAS_ID, "drives_temp");
-       if (state->monitor == NULL)
-               return -ENODEV;
-
-       err = device_create_file(&of_dev->dev, &dev_attr_drives_temperature);
-       err |= device_create_file(&of_dev->dev, &dev_attr_drives_fan_rpm);
-       if (err)
-               printk(KERN_WARNING "Failed to create attribute file(s)"
-                       " for drives bay fan\n");
-
-       return 0;
-}
-
-/*
- * Dispose of the state data for the drives control loop
- */
-static void dispose_drives_state(struct drives_pid_state *state)
-{
-       if (state->monitor == NULL)
-               return;
-
-       device_remove_file(&of_dev->dev, &dev_attr_drives_temperature);
-       device_remove_file(&of_dev->dev, &dev_attr_drives_fan_rpm);
-
-       state->monitor = NULL;
-}
-
-/*
- * DIMMs temp control loop
- */
-static void do_monitor_dimms(struct dimm_pid_state *state)
-{
-       s32 temp, integral, derivative, fan_min;
-       s64 integ_p, deriv_p, prop_p, sum;
-       int i;
-
-       if (--state->ticks != 0)
-               return;
-       state->ticks = DIMM_PID_INTERVAL;
-
-       DBG("DIMM:\n");
-
-       DBG("  current value: %d\n", state->output);
-
-       temp = read_lm87_reg(state->monitor, LM87_INT_TEMP);
-       if (temp < 0)
-               return;
-       temp <<= 16;
-       state->last_temp = temp;
-       DBG("  temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
-           FIX32TOPRINT(DIMM_PID_INPUT_TARGET));
-
-       /* Store temperature and error in history array */
-       state->cur_sample = (state->cur_sample + 1) % DIMM_PID_HISTORY_SIZE;
-       state->sample_history[state->cur_sample] = temp;
-       state->error_history[state->cur_sample] = temp - DIMM_PID_INPUT_TARGET;
-
-       /* If first loop, fill the history table */
-       if (state->first) {
-               for (i = 0; i < (DIMM_PID_HISTORY_SIZE - 1); i++) {
-                       state->cur_sample = (state->cur_sample + 1) %
-                               DIMM_PID_HISTORY_SIZE;
-                       state->sample_history[state->cur_sample] = temp;
-                       state->error_history[state->cur_sample] =
-                               temp - DIMM_PID_INPUT_TARGET;
-               }
-               state->first = 0;
-       }
-
-       /* Calculate the integral term */
-       sum = 0;
-       integral = 0;
-       for (i = 0; i < DIMM_PID_HISTORY_SIZE; i++)
-               integral += state->error_history[i];
-       integral *= DIMM_PID_INTERVAL;
-       DBG("  integral: %08x\n", integral);
-       integ_p = ((s64)DIMM_PID_G_r) * (s64)integral;
-       DBG("   integ_p: %d\n", (int)(integ_p >> 36));
-       sum += integ_p;
-
-       /* Calculate the derivative term */
-       derivative = state->error_history[state->cur_sample] -
-               state->error_history[(state->cur_sample + DIMM_PID_HISTORY_SIZE - 1)
-                                   % DIMM_PID_HISTORY_SIZE];
-       derivative /= DIMM_PID_INTERVAL;
-       deriv_p = ((s64)DIMM_PID_G_d) * (s64)derivative;
-       DBG("   deriv_p: %d\n", (int)(deriv_p >> 36));
-       sum += deriv_p;
-
-       /* Calculate the proportional term */
-       prop_p = ((s64)DIMM_PID_G_p) * (s64)(state->error_history[state->cur_sample]);
-       DBG("   prop_p: %d\n", (int)(prop_p >> 36));
-       sum += prop_p;
-
-       /* Scale sum */
-       sum >>= 36;
-
-       DBG("   sum: %d\n", (int)sum);
-       state->output = (s32)sum;
-       state->output = max(state->output, DIMM_PID_OUTPUT_MIN);
-       state->output = min(state->output, DIMM_PID_OUTPUT_MAX);
-       dimm_output_clamp = state->output;
-
-       DBG("** DIMM clamp value: %d\n", (int)state->output);
-
-       /* Backside PID is only every 5 seconds, force backside fan clamping now */
-       fan_min = (dimm_output_clamp * 100) / 14000;
-       fan_min = max(fan_min, backside_params.output_min);
-       if (backside_state.pwm < fan_min) {
-               backside_state.pwm = fan_min;
-               DBG(" -> applying clamp to backside fan now: %d  !\n", fan_min);
-               set_pwm_fan(BACKSIDE_FAN_PWM_INDEX, fan_min);
-       }
-}
-
-/*
- * Initialize the state structure for the DIMM temp control loop
- */
-static int init_dimms_state(struct dimm_pid_state *state)
-{
-       state->ticks = 1;
-       state->first = 1;
-       state->output = 4000;
-
-       state->monitor = attach_i2c_chip(XSERVE_DIMMS_LM87, "dimms_temp");
-       if (state->monitor == NULL)
-               return -ENODEV;
-
-       if (device_create_file(&of_dev->dev, &dev_attr_dimms_temperature))
-               printk(KERN_WARNING "Failed to create attribute file"
-                       " for DIMM temperature\n");
-
-       return 0;
-}
-
-/*
- * Dispose of the state data for the DIMM control loop
- */
-static void dispose_dimms_state(struct dimm_pid_state *state)
-{
-       if (state->monitor == NULL)
-               return;
-
-       device_remove_file(&of_dev->dev, &dev_attr_dimms_temperature);
-
-       state->monitor = NULL;
-}
-
-/*
- * Slots fan control loop
- */
-static void do_monitor_slots(struct slots_pid_state *state)
-{
-       s32 temp, integral, derivative;
-       s64 integ_p, deriv_p, prop_p, sum;
-       int i, rc;
-
-       if (--state->ticks != 0)
-               return;
-       state->ticks = SLOTS_PID_INTERVAL;
-
-       DBG("slots:\n");
-
-       /* Check fan status */
-       rc = get_pwm_fan(SLOTS_FAN_PWM_INDEX);
-       if (rc < 0) {
-               printk(KERN_WARNING "Error %d reading slots fan !\n", rc);
-               /* XXX What do we do now ? */
-       } else
-               state->pwm = rc;
-       DBG("  current pwm: %d\n", state->pwm);
-
-       /* Get some sensor readings */
-       temp = le16_to_cpu(i2c_smbus_read_word_data(state->monitor,
-                                                   DS1775_TEMP)) << 8;
-       state->last_temp = temp;
-       DBG("  temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp),
-           FIX32TOPRINT(SLOTS_PID_INPUT_TARGET));
-
-       /* Store temperature and error in history array */
-       state->cur_sample = (state->cur_sample + 1) % SLOTS_PID_HISTORY_SIZE;
-       state->sample_history[state->cur_sample] = temp;
-       state->error_history[state->cur_sample] = temp - SLOTS_PID_INPUT_TARGET;
-
-       /* If first loop, fill the history table */
-       if (state->first) {
-               for (i = 0; i < (SLOTS_PID_HISTORY_SIZE - 1); i++) {
-                       state->cur_sample = (state->cur_sample + 1) %
-                               SLOTS_PID_HISTORY_SIZE;
-                       state->sample_history[state->cur_sample] = temp;
-                       state->error_history[state->cur_sample] =
-                               temp - SLOTS_PID_INPUT_TARGET;
-               }
-               state->first = 0;
-       }
-
-       /* Calculate the integral term */
-       sum = 0;
-       integral = 0;
-       for (i = 0; i < SLOTS_PID_HISTORY_SIZE; i++)
-               integral += state->error_history[i];
-       integral *= SLOTS_PID_INTERVAL;
-       DBG("  integral: %08x\n", integral);
-       integ_p = ((s64)SLOTS_PID_G_r) * (s64)integral;
-       DBG("   integ_p: %d\n", (int)(integ_p >> 36));
-       sum += integ_p;
-
-       /* Calculate the derivative term */
-       derivative = state->error_history[state->cur_sample] -
-               state->error_history[(state->cur_sample + SLOTS_PID_HISTORY_SIZE - 1)
-                                   % SLOTS_PID_HISTORY_SIZE];
-       derivative /= SLOTS_PID_INTERVAL;
-       deriv_p = ((s64)SLOTS_PID_G_d) * (s64)derivative;
-       DBG("   deriv_p: %d\n", (int)(deriv_p >> 36));
-       sum += deriv_p;
-
-       /* Calculate the proportional term */
-       prop_p = ((s64)SLOTS_PID_G_p) * (s64)(state->error_history[state->cur_sample]);
-       DBG("   prop_p: %d\n", (int)(prop_p >> 36));
-       sum += prop_p;
-
-       /* Scale sum */
-       sum >>= 36;
-
-       DBG("   sum: %d\n", (int)sum);
-       state->pwm = (s32)sum;
-
-       state->pwm = max(state->pwm, SLOTS_PID_OUTPUT_MIN);
-       state->pwm = min(state->pwm, SLOTS_PID_OUTPUT_MAX);
-
-       DBG("** DRIVES PWM: %d\n", (int)state->pwm);
-       set_pwm_fan(SLOTS_FAN_PWM_INDEX, state->pwm);
-}
-
-/*
- * Initialize the state structure for the slots bay fan control loop
- */
-static int init_slots_state(struct slots_pid_state *state)
-{
-       int err;
-
-       state->ticks = 1;
-       state->first = 1;
-       state->pwm = 50;
-
-       state->monitor = attach_i2c_chip(XSERVE_SLOTS_LM75, "slots_temp");
-       if (state->monitor == NULL)
-               return -ENODEV;
-
-       err = device_create_file(&of_dev->dev, &dev_attr_slots_temperature);
-       err |= device_create_file(&of_dev->dev, &dev_attr_slots_fan_pwm);
-       if (err)
-               printk(KERN_WARNING "Failed to create attribute file(s)"
-                       " for slots bay fan\n");
-
-       return 0;
-}
-
-/*
- * Dispose of the state data for the slots control loop
- */
-static void dispose_slots_state(struct slots_pid_state *state)
-{
-       if (state->monitor == NULL)
-               return;
-
-       device_remove_file(&of_dev->dev, &dev_attr_slots_temperature);
-       device_remove_file(&of_dev->dev, &dev_attr_slots_fan_pwm);
-
-       state->monitor = NULL;
-}
-
-
-static int call_critical_overtemp(void)
-{
-       char *argv[] = { critical_overtemp_path, NULL };
-       static char *envp[] = { "HOME=/",
-                               "TERM=linux",
-                               "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
-                               NULL };
-
-       return call_usermodehelper(critical_overtemp_path,
-                                  argv, envp, UMH_WAIT_EXEC);
-}
-
-
-/*
- * Here's the kernel thread that calls the various control loops
- */
-static int main_control_loop(void *x)
-{
-       DBG("main_control_loop started\n");
-
-       mutex_lock(&driver_lock);
-
-       if (start_fcu() < 0) {
-               printk(KERN_ERR "kfand: failed to start FCU\n");
-               mutex_unlock(&driver_lock);
-               goto out;
-       }
-
-       /* Set the PCI fan once for now on non-RackMac */
-       if (!rackmac)
-               set_pwm_fan(SLOTS_FAN_PWM_INDEX, SLOTS_FAN_DEFAULT_PWM);
-
-       /* Initialize ADCs */
-       initialize_adc(&processor_state[0]);
-       if (processor_state[1].monitor != NULL)
-               initialize_adc(&processor_state[1]);
-
-       fcu_tickle_ticks = FCU_TICKLE_TICKS;
-
-       mutex_unlock(&driver_lock);
-
-       while (state == state_attached) {
-               unsigned long elapsed, start;
-
-               start = jiffies;
-
-               mutex_lock(&driver_lock);
-
-               /* Tickle the FCU just in case */
-               if (--fcu_tickle_ticks < 0) {
-                       fcu_tickle_ticks = FCU_TICKLE_TICKS;
-                       tickle_fcu();
-               }
-
-               /* First, we always calculate the new DIMMs state on an Xserve */
-               if (rackmac)
-                       do_monitor_dimms(&dimms_state);
-
-               /* Then, the CPUs */
-               if (cpu_pid_type == CPU_PID_TYPE_COMBINED)
-                       do_monitor_cpu_combined();
-               else if (cpu_pid_type == CPU_PID_TYPE_RACKMAC) {
-                       do_monitor_cpu_rack(&processor_state[0]);
-                       if (processor_state[1].monitor != NULL)
-                               do_monitor_cpu_rack(&processor_state[1]);
-                       // better deal with UP
-               } else {
-                       do_monitor_cpu_split(&processor_state[0]);
-                       if (processor_state[1].monitor != NULL)
-                               do_monitor_cpu_split(&processor_state[1]);
-                       // better deal with UP
-               }
-               /* Then, the rest */
-               do_monitor_backside(&backside_state);
-               if (rackmac)
-                       do_monitor_slots(&slots_state);
-               else
-                       do_monitor_drives(&drives_state);
-               mutex_unlock(&driver_lock);
-
-               if (critical_state == 1) {
-                       printk(KERN_WARNING "Temperature control detected a critical condition\n");
-                       printk(KERN_WARNING "Attempting to shut down...\n");
-                       if (call_critical_overtemp()) {
-                               printk(KERN_WARNING "Can't call %s, power off now!\n",
-                                      critical_overtemp_path);
-                               machine_power_off();
-                       }
-               }
-               if (critical_state > 0)
-                       critical_state++;
-               if (critical_state > MAX_CRITICAL_STATE) {
-                       printk(KERN_WARNING "Shutdown timed out, power off now !\n");
-                       machine_power_off();
-               }
-
-               // FIXME: Deal with signals
-               elapsed = jiffies - start;
-               if (elapsed < HZ)
-                       schedule_timeout_interruptible(HZ - elapsed);
-       }
-
- out:
-       DBG("main_control_loop ended\n");
-
-       ctrl_task = 0;
-       complete_and_exit(&ctrl_complete, 0);
-}
-
-/*
- * Dispose the control loops when tearing down
- */
-static void dispose_control_loops(void)
-{
-       dispose_processor_state(&processor_state[0]);
-       dispose_processor_state(&processor_state[1]);
-       dispose_backside_state(&backside_state);
-       dispose_drives_state(&drives_state);
-       dispose_slots_state(&slots_state);
-       dispose_dimms_state(&dimms_state);
-}
-
-/*
- * Create the control loops. U3-0 i2c bus is up, so we can now
- * get to the various sensors
- */
-static int create_control_loops(void)
-{
-       struct device_node *np;
-
-       /* Count CPUs from the device-tree, we don't care how many are
-        * actually used by Linux
-        */
-       cpu_count = 0;
-       for (np = NULL; NULL != (np = of_find_node_by_type(np, "cpu"));)
-               cpu_count++;
-
-       DBG("counted %d CPUs in the device-tree\n", cpu_count);
-
-       /* Decide the type of PID algorithm to use based on the presence of
-        * the pumps, though that may not be the best way, that is good enough
-        * for now
-        */
-       if (rackmac)
-               cpu_pid_type = CPU_PID_TYPE_RACKMAC;
-       else if (of_machine_is_compatible("PowerMac7,3")
-           && (cpu_count > 1)
-           && fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID
-           && fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) {
-               printk(KERN_INFO "Liquid cooling pumps detected, using new algorithm !\n");
-               cpu_pid_type = CPU_PID_TYPE_COMBINED;
-       } else
-               cpu_pid_type = CPU_PID_TYPE_SPLIT;
-
-       /* Create control loops for everything. If any fail, everything
-        * fails
-        */
-       if (init_processor_state(&processor_state[0], 0))
-               goto fail;
-       if (cpu_pid_type == CPU_PID_TYPE_COMBINED)
-               fetch_cpu_pumps_minmax();
-
-       if (cpu_count > 1 && init_processor_state(&processor_state[1], 1))
-               goto fail;
-       if (init_backside_state(&backside_state))
-               goto fail;
-       if (rackmac && init_dimms_state(&dimms_state))
-               goto fail;
-       if (rackmac && init_slots_state(&slots_state))
-               goto fail;
-       if (!rackmac && init_drives_state(&drives_state))
-               goto fail;
-
-       DBG("all control loops up !\n");
-
-       return 0;
-       
- fail:
-       DBG("failure creating control loops, disposing\n");
-
-       dispose_control_loops();
-
-       return -ENODEV;
-}
-
-/*
- * Start the control loops after everything is up, that is create
- * the thread that will make them run
- */
-static void start_control_loops(void)
-{
-       init_completion(&ctrl_complete);
-
-       ctrl_task = kthread_run(main_control_loop, NULL, "kfand");
-}
-
-/*
- * Stop the control loops when tearing down
- */
-static void stop_control_loops(void)
-{
-       if (ctrl_task)
-               wait_for_completion(&ctrl_complete);
-}
-
-/*
- * Attach to the i2c FCU after detecting U3-1 bus
- */
-static int attach_fcu(void)
-{
-       fcu = attach_i2c_chip(FAN_CTRLER_ID, "fcu");
-       if (fcu == NULL)
-               return -ENODEV;
-
-       DBG("FCU attached\n");
-
-       return 0;
-}
-
-/*
- * Detach from the i2c FCU when tearing down
- */
-static void detach_fcu(void)
-{
-       fcu = NULL;
-}
-
-/*
- * Attach to the i2c controller. We probe the various chips based
- * on the device-tree nodes and build everything for the driver to
- * run, we then kick the driver monitoring thread
- */
-static int therm_pm72_attach(struct i2c_adapter *adapter)
-{
-       mutex_lock(&driver_lock);
-
-       /* Check state */
-       if (state == state_detached)
-               state = state_attaching;
-       if (state != state_attaching) {
-               mutex_unlock(&driver_lock);
-               return 0;
-       }
-
-       /* Check if we are looking for one of these */
-       if (u3_0 == NULL && !strcmp(adapter->name, "u3 0")) {
-               u3_0 = adapter;
-               DBG("found U3-0\n");
-               if (k2 || !rackmac)
-                       if (create_control_loops())
-                               u3_0 = NULL;
-       } else if (u3_1 == NULL && !strcmp(adapter->name, "u3 1")) {
-               u3_1 = adapter;
-               DBG("found U3-1, attaching FCU\n");
-               if (attach_fcu())
-                       u3_1 = NULL;
-       } else if (k2 == NULL && !strcmp(adapter->name, "mac-io 0")) {
-               k2 = adapter;
-               DBG("Found K2\n");
-               if (u3_0 && rackmac)
-                       if (create_control_loops())
-                               k2 = NULL;
-       }
-       /* We got all we need, start control loops */
-       if (u3_0 != NULL && u3_1 != NULL && (k2 || !rackmac)) {
-               DBG("everything up, starting control loops\n");
-               state = state_attached;
-               start_control_loops();
-       }
-       mutex_unlock(&driver_lock);
-
-       return 0;
-}
-
-static int therm_pm72_probe(struct i2c_client *client,
-                           const struct i2c_device_id *id)
-{
-       /* Always succeed, the real work was done in therm_pm72_attach() */
-       return 0;
-}
-
-/*
- * Called when any of the devices which participates into thermal management
- * is going away.
- */
-static int therm_pm72_remove(struct i2c_client *client)
-{
-       struct i2c_adapter *adapter = client->adapter;
-
-       mutex_lock(&driver_lock);
-
-       if (state != state_detached)
-               state = state_detaching;
-
-       /* Stop control loops if any */
-       DBG("stopping control loops\n");
-       mutex_unlock(&driver_lock);
-       stop_control_loops();
-       mutex_lock(&driver_lock);
-
-       if (u3_0 != NULL && !strcmp(adapter->name, "u3 0")) {
-               DBG("lost U3-0, disposing control loops\n");
-               dispose_control_loops();
-               u3_0 = NULL;
-       }
-       
-       if (u3_1 != NULL && !strcmp(adapter->name, "u3 1")) {
-               DBG("lost U3-1, detaching FCU\n");
-               detach_fcu();
-               u3_1 = NULL;
-       }
-       if (u3_0 == NULL && u3_1 == NULL)
-               state = state_detached;
-
-       mutex_unlock(&driver_lock);
-
-       return 0;
-}
-
-/*
- * i2c_driver structure to attach to the host i2c controller
- */
-
-static const struct i2c_device_id therm_pm72_id[] = {
-       /*
-        * Fake device name, thermal management is done by several
-        * chips but we don't need to differentiate between them at
-        * this point.
-        */
-       { "therm_pm72", 0 },
-       { }
-};
-
-static struct i2c_driver therm_pm72_driver = {
-       .driver = {
-               .name   = "therm_pm72",
-       },
-       .attach_adapter = therm_pm72_attach,
-       .probe          = therm_pm72_probe,
-       .remove         = therm_pm72_remove,
-       .id_table       = therm_pm72_id,
-};
-
-static int fan_check_loc_match(const char *loc, int fan)
-{
-       char    tmp[64];
-       char    *c, *e;
-
-       strlcpy(tmp, fcu_fans[fan].loc, 64);
-
-       c = tmp;
-       for (;;) {
-               e = strchr(c, ',');
-               if (e)
-                       *e = 0;
-               if (strcmp(loc, c) == 0)
-                       return 1;
-               if (e == NULL)
-                       break;
-               c = e + 1;
-       }
-       return 0;
-}
-
-static void fcu_lookup_fans(struct device_node *fcu_node)
-{
-       struct device_node *np = NULL;
-       int i;
-
-       /* The table is filled by default with values that are suitable
-        * for the old machines without device-tree informations. We scan
-        * the device-tree and override those values with whatever is
-        * there
-        */
-
-       DBG("Looking up FCU controls in device-tree...\n");
-
-       while ((np = of_get_next_child(fcu_node, np)) != NULL) {
-               int type = -1;
-               const char *loc;
-               const u32 *reg;
-
-               DBG(" control: %s, type: %s\n", np->name, np->type);
-
-               /* Detect control type */
-               if (!strcmp(np->type, "fan-rpm-control") ||
-                   !strcmp(np->type, "fan-rpm"))
-                       type = FCU_FAN_RPM;
-               if (!strcmp(np->type, "fan-pwm-control") ||
-                   !strcmp(np->type, "fan-pwm"))
-                       type = FCU_FAN_PWM;
-               /* Only care about fans for now */
-               if (type == -1)
-                       continue;
-
-               /* Lookup for a matching location */
-               loc = of_get_property(np, "location", NULL);
-               reg = of_get_property(np, "reg", NULL);
-               if (loc == NULL || reg == NULL)
-                       continue;
-               DBG(" matching location: %s, reg: 0x%08x\n", loc, *reg);
-
-               for (i = 0; i < FCU_FAN_COUNT; i++) {
-                       int fan_id;
-
-                       if (!fan_check_loc_match(loc, i))
-                               continue;
-                       DBG(" location match, index: %d\n", i);
-                       fcu_fans[i].id = FCU_FAN_ABSENT_ID;
-                       if (type != fcu_fans[i].type) {
-                               printk(KERN_WARNING "therm_pm72: Fan type mismatch "
-                                      "in device-tree for %s\n", np->full_name);
-                               break;
-                       }
-                       if (type == FCU_FAN_RPM)
-                               fan_id = ((*reg) - 0x10) / 2;
-                       else
-                               fan_id = ((*reg) - 0x30) / 2;
-                       if (fan_id > 7) {
-                               printk(KERN_WARNING "therm_pm72: Can't parse "
-                                      "fan ID in device-tree for %s\n", np->full_name);
-                               break;
-                       }
-                       DBG(" fan id -> %d, type -> %d\n", fan_id, type);
-                       fcu_fans[i].id = fan_id;
-               }
-       }
-
-       /* Now dump the array */
-       printk(KERN_INFO "Detected fan controls:\n");
-       for (i = 0; i < FCU_FAN_COUNT; i++) {
-               if (fcu_fans[i].id == FCU_FAN_ABSENT_ID)
-                       continue;
-               printk(KERN_INFO "  %d: %s fan, id %d, location: %s\n", i,
-                      fcu_fans[i].type == FCU_FAN_RPM ? "RPM" : "PWM",
-                      fcu_fans[i].id, fcu_fans[i].loc);
-       }
-}
-
-static int fcu_of_probe(struct platform_device* dev)
-{
-       state = state_detached;
-       of_dev = dev;
-
-       dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION);
-
-       /* Lookup the fans in the device tree */
-       fcu_lookup_fans(dev->dev.of_node);
-
-       /* Add the driver */
-       return i2c_add_driver(&therm_pm72_driver);
-}
-
-static int fcu_of_remove(struct platform_device* dev)
-{
-       i2c_del_driver(&therm_pm72_driver);
-
-       return 0;
-}
-
-static const struct of_device_id fcu_match[] = 
-{
-       {
-       .type           = "fcu",
-       },
-       {},
-};
-MODULE_DEVICE_TABLE(of, fcu_match);
-
-static struct platform_driver fcu_of_platform_driver = 
-{
-       .driver = {
-               .name = "temperature",
-               .of_match_table = fcu_match,
-       },
-       .probe          = fcu_of_probe,
-       .remove         = fcu_of_remove
-};
-
-/*
- * Check machine type, attach to i2c controller
- */
-static int __init therm_pm72_init(void)
-{
-       rackmac = of_machine_is_compatible("RackMac3,1");
-
-       if (!of_machine_is_compatible("PowerMac7,2") &&
-           !of_machine_is_compatible("PowerMac7,3") &&
-           !rackmac)
-               return -ENODEV;
-
-       return platform_driver_register(&fcu_of_platform_driver);
-}
-
-static void __exit therm_pm72_exit(void)
-{
-       platform_driver_unregister(&fcu_of_platform_driver);
-}
-
-module_init(therm_pm72_init);
-module_exit(therm_pm72_exit);
-
-MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
-MODULE_DESCRIPTION("Driver for Apple's PowerMac G5 thermal control");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/macintosh/therm_pm72.h b/drivers/macintosh/therm_pm72.h
deleted file mode 100644 (file)
index df3680e..0000000
+++ /dev/null
@@ -1,326 +0,0 @@
-#ifndef __THERM_PMAC_7_2_H__
-#define __THERM_PMAC_7_2_H__
-
-typedef unsigned short fu16;
-typedef int fs32;
-typedef short fs16;
-
-struct mpu_data
-{
-       u8      signature;              /* 0x00 - EEPROM sig. */
-       u8      bytes_used;             /* 0x01 - Bytes used in eeprom (160 ?) */
-       u8      size;                   /* 0x02 - EEPROM size (256 ?) */
-       u8      version;                /* 0x03 - EEPROM version */
-       u32     data_revision;          /* 0x04 - Dataset revision */
-       u8      processor_bin_code[3];  /* 0x08 - Processor BIN code */
-       u8      bin_code_expansion;     /* 0x0b - ??? (padding ?) */
-       u8      processor_num;          /* 0x0c - Number of CPUs on this MPU */
-       u8      input_mul_bus_div;      /* 0x0d - Clock input multiplier/bus divider */
-       u8      reserved1[2];           /* 0x0e - */
-       u32     input_clk_freq_high;    /* 0x10 - Input clock frequency high */
-       u8      cpu_nb_target_cycles;   /* 0x14 - ??? */
-       u8      cpu_statlat;            /* 0x15 - ??? */
-       u8      cpu_snooplat;           /* 0x16 - ??? */
-       u8      cpu_snoopacc;           /* 0x17 - ??? */
-       u8      nb_paamwin;             /* 0x18 - ??? */
-       u8      nb_statlat;             /* 0x19 - ??? */
-       u8      nb_snooplat;            /* 0x1a - ??? */
-       u8      nb_snoopwin;            /* 0x1b - ??? */
-       u8      api_bus_mode;           /* 0x1c - ??? */
-       u8      reserved2[3];           /* 0x1d - */
-       u32     input_clk_freq_low;     /* 0x20 - Input clock frequency low */
-       u8      processor_card_slot;    /* 0x24 - Processor card slot number */
-       u8      reserved3[2];           /* 0x25 - */
-       u8      padjmax;                /* 0x27 - Max power adjustment (Not in OF!) */
-       u8      ttarget;                /* 0x28 - Target temperature */
-       u8      tmax;                   /* 0x29 - Max temperature */
-       u8      pmaxh;                  /* 0x2a - Max power */
-       u8      tguardband;             /* 0x2b - Guardband temp ??? Hist. len in OSX */
-       fs32    pid_gp;                 /* 0x2c - PID proportional gain */
-       fs32    pid_gr;                 /* 0x30 - PID reset gain */
-       fs32    pid_gd;                 /* 0x34 - PID derivative gain */
-       fu16    voph;                   /* 0x38 - Vop High */
-       fu16    vopl;                   /* 0x3a - Vop Low */
-       fs16    nactual_die;            /* 0x3c - nActual Die */
-       fs16    nactual_heatsink;       /* 0x3e - nActual Heatsink */
-       fs16    nactual_system;         /* 0x40 - nActual System */
-       u16     calibration_flags;      /* 0x42 - Calibration flags */
-       fu16    mdiode;                 /* 0x44 - Diode M value (scaling factor) */
-       fs16    bdiode;                 /* 0x46 - Diode B value (offset) */
-       fs32    theta_heat_sink;        /* 0x48 - Theta heat sink */
-       u16     rminn_intake_fan;       /* 0x4c - Intake fan min RPM */
-       u16     rmaxn_intake_fan;       /* 0x4e - Intake fan max RPM */
-       u16     rminn_exhaust_fan;      /* 0x50 - Exhaust fan min RPM */
-       u16     rmaxn_exhaust_fan;      /* 0x52 - Exhaust fan max RPM */
-       u8      processor_part_num[8];  /* 0x54 - Processor part number XX pumps min/max */
-       u32     processor_lot_num;      /* 0x5c - Processor lot number */
-       u8      orig_card_sernum[0x10]; /* 0x60 - Card original serial number */
-       u8      curr_card_sernum[0x10]; /* 0x70 - Card current serial number */
-       u8      mlb_sernum[0x18];       /* 0x80 - MLB serial number */
-       u32     checksum1;              /* 0x98 - */
-       u32     checksum2;              /* 0x9c - */    
-}; /* Total size = 0xa0 */
-
-/* Display a 16.16 fixed point value */
-#define FIX32TOPRINT(f)        ((f) >> 16),((((f) & 0xffff) * 1000) >> 16)
-
-/*
- * Maximum number of seconds to be in critical state (after a
- * normal shutdown attempt). If the machine isn't down after
- * this counter elapses, we force an immediate machine power
- * off.
- */
-#define MAX_CRITICAL_STATE                     30
-static char * critical_overtemp_path = "/sbin/critical_overtemp";
-
-/*
- * This option is "weird" :) Basically, if you define this to 1
- * the control loop for the RPMs fans (not PWMs) will apply the
- * correction factor obtained from the PID to the _actual_ RPM
- * speed read from the FCU.
- * If you define the below constant to 0, then it will be
- * applied to the setpoint RPM speed, that is basically the
- * speed we proviously "asked" for.
- *
- * I'm not sure which of these Apple's algorithm is supposed
- * to use
- */
-#define RPM_PID_USE_ACTUAL_SPEED               0
-
-/*
- * i2c IDs. Currently, we hard code those and assume that
- * the FCU is on U3 bus 1 while all sensors are on U3 bus
- * 0. This appear to be safe enough for this first version
- * of the driver, though I would accept any clean patch
- * doing a better use of the device-tree without turning the
- * while i2c registration mechanism into a racy mess
- *
- * Note: Xserve changed this. We have some bits on the K2 bus,
- * which I arbitrarily set to 0x200. Ultimately, we really want
- * too lookup these in the device-tree though
- */
-#define FAN_CTRLER_ID          0x15e
-#define SUPPLY_MONITOR_ID              0x58
-#define SUPPLY_MONITORB_ID             0x5a
-#define DRIVES_DALLAS_ID       0x94
-#define BACKSIDE_MAX_ID                0x98
-#define XSERVE_DIMMS_LM87      0x25a
-#define XSERVE_SLOTS_LM75      0x290
-
-/*
- * Some MAX6690, DS1775, LM87 register definitions
- */
-#define MAX6690_INT_TEMP       0
-#define MAX6690_EXT_TEMP       1
-#define DS1775_TEMP            0
-#define LM87_INT_TEMP          0x27
-
-/*
- * Scaling factors for the AD7417 ADC converters (except
- * for the CPU diode which is obtained from the EEPROM).
- * Those values are obtained from the property list of
- * the darwin driver
- */
-#define ADC_12V_CURRENT_SCALE  0x0320  /* _AD2 */
-#define ADC_CPU_VOLTAGE_SCALE  0x00a0  /* _AD3 */
-#define ADC_CPU_CURRENT_SCALE  0x1f40  /* _AD4 */
-
-/*
- * PID factors for the U3/Backside fan control loop. We have 2 sets
- * of values here, one set for U3 and one set for U3H
- */
-#define BACKSIDE_FAN_PWM_DEFAULT_ID    1
-#define BACKSIDE_FAN_PWM_INDEX         0
-#define BACKSIDE_PID_U3_G_d            0x02800000
-#define BACKSIDE_PID_U3H_G_d           0x01400000
-#define BACKSIDE_PID_RACK_G_d          0x00500000
-#define BACKSIDE_PID_G_p               0x00500000
-#define BACKSIDE_PID_RACK_G_p          0x0004cccc
-#define BACKSIDE_PID_G_r               0x00000000
-#define BACKSIDE_PID_U3_INPUT_TARGET   0x00410000
-#define BACKSIDE_PID_U3H_INPUT_TARGET  0x004b0000
-#define BACKSIDE_PID_RACK_INPUT_TARGET 0x00460000
-#define BACKSIDE_PID_INTERVAL          5
-#define BACKSIDE_PID_RACK_INTERVAL     1
-#define BACKSIDE_PID_OUTPUT_MAX                100
-#define BACKSIDE_PID_U3_OUTPUT_MIN     20
-#define BACKSIDE_PID_U3H_OUTPUT_MIN    20
-#define BACKSIDE_PID_HISTORY_SIZE      2
-
-struct basckside_pid_params
-{
-       s32                     G_d;
-       s32                     G_p;
-       s32                     G_r;
-       s32                     input_target;
-       s32                     output_min;
-       s32                     output_max;
-       s32                     interval;
-       int                     additive;
-};
-
-struct backside_pid_state
-{
-       int                     ticks;
-       struct i2c_client *     monitor;
-       s32                     sample_history[BACKSIDE_PID_HISTORY_SIZE];
-       s32                     error_history[BACKSIDE_PID_HISTORY_SIZE];
-       int                     cur_sample;
-       s32                     last_temp;
-       int                     pwm;
-       int                     first;
-};
-
-/*
- * PID factors for the Drive Bay fan control loop
- */
-#define DRIVES_FAN_RPM_DEFAULT_ID      2
-#define DRIVES_FAN_RPM_INDEX           1
-#define DRIVES_PID_G_d                 0x01e00000
-#define DRIVES_PID_G_p                 0x00500000
-#define DRIVES_PID_G_r                 0x00000000
-#define DRIVES_PID_INPUT_TARGET                0x00280000
-#define DRIVES_PID_INTERVAL                    5
-#define DRIVES_PID_OUTPUT_MAX          4000
-#define DRIVES_PID_OUTPUT_MIN          300
-#define DRIVES_PID_HISTORY_SIZE                2
-
-struct drives_pid_state
-{
-       int                     ticks;
-       struct i2c_client *     monitor;
-       s32                     sample_history[BACKSIDE_PID_HISTORY_SIZE];
-       s32                     error_history[BACKSIDE_PID_HISTORY_SIZE];
-       int                     cur_sample;
-       s32                     last_temp;
-       int                     rpm;
-       int                     first;
-};
-
-#define SLOTS_FAN_PWM_DEFAULT_ID       2
-#define SLOTS_FAN_PWM_INDEX            2
-#define        SLOTS_FAN_DEFAULT_PWM           40 /* Do better here ! */
-
-
-/*
- * PID factors for the Xserve DIMM control loop
- */
-#define DIMM_PID_G_d                   0
-#define DIMM_PID_G_p                   0
-#define DIMM_PID_G_r                   0x06553600
-#define DIMM_PID_INPUT_TARGET          3276800
-#define DIMM_PID_INTERVAL              1
-#define DIMM_PID_OUTPUT_MAX            14000
-#define DIMM_PID_OUTPUT_MIN            4000
-#define DIMM_PID_HISTORY_SIZE          20
-
-struct dimm_pid_state
-{
-       int                     ticks;
-       struct i2c_client *     monitor;
-       s32                     sample_history[DIMM_PID_HISTORY_SIZE];
-       s32                     error_history[DIMM_PID_HISTORY_SIZE];
-       int                     cur_sample;
-       s32                     last_temp;
-       int                     first;
-       int                     output;
-};
-
-
-/*
- * PID factors for the Xserve Slots control loop
- */
-#define SLOTS_PID_G_d                  0
-#define SLOTS_PID_G_p                  0
-#define SLOTS_PID_G_r                  0x00100000
-#define SLOTS_PID_INPUT_TARGET         3200000
-#define SLOTS_PID_INTERVAL             1
-#define SLOTS_PID_OUTPUT_MAX           100
-#define SLOTS_PID_OUTPUT_MIN           20
-#define SLOTS_PID_HISTORY_SIZE         20
-
-struct slots_pid_state
-{
-       int                     ticks;
-       struct i2c_client *     monitor;
-       s32                     sample_history[SLOTS_PID_HISTORY_SIZE];
-       s32                     error_history[SLOTS_PID_HISTORY_SIZE];
-       int                     cur_sample;
-       s32                     last_temp;
-       int                     first;
-       int                     pwm;
-};
-
-
-
-/* Desktops */
-
-#define CPUA_INTAKE_FAN_RPM_DEFAULT_ID 3
-#define CPUA_EXHAUST_FAN_RPM_DEFAULT_ID        4
-#define CPUB_INTAKE_FAN_RPM_DEFAULT_ID 5
-#define CPUB_EXHAUST_FAN_RPM_DEFAULT_ID        6
-
-#define CPUA_INTAKE_FAN_RPM_INDEX      3
-#define CPUA_EXHAUST_FAN_RPM_INDEX     4
-#define CPUB_INTAKE_FAN_RPM_INDEX      5
-#define CPUB_EXHAUST_FAN_RPM_INDEX     6
-
-#define CPU_INTAKE_SCALE               0x0000f852
-#define CPU_TEMP_HISTORY_SIZE          2
-#define CPU_POWER_HISTORY_SIZE         10
-#define CPU_PID_INTERVAL               1
-#define CPU_MAX_OVERTEMP               90
-
-#define CPUA_PUMP_RPM_INDEX            7
-#define CPUB_PUMP_RPM_INDEX            8
-#define CPU_PUMP_OUTPUT_MAX            3200
-#define CPU_PUMP_OUTPUT_MIN            1250
-
-/* Xserve */
-#define CPU_A1_FAN_RPM_INDEX           9
-#define CPU_A2_FAN_RPM_INDEX           10
-#define CPU_A3_FAN_RPM_INDEX           11
-#define CPU_B1_FAN_RPM_INDEX           12
-#define CPU_B2_FAN_RPM_INDEX           13
-#define CPU_B3_FAN_RPM_INDEX           14
-
-
-struct cpu_pid_state
-{
-       int                     index;
-       struct i2c_client *     monitor;
-       struct mpu_data         mpu;
-       int                     overtemp;
-       s32                     temp_history[CPU_TEMP_HISTORY_SIZE];
-       int                     cur_temp;
-       s32                     power_history[CPU_POWER_HISTORY_SIZE];
-       s32                     error_history[CPU_POWER_HISTORY_SIZE];
-       int                     cur_power;
-       int                     count_power;
-       int                     rpm;
-       int                     intake_rpm;
-       s32                     voltage;
-       s32                     current_a;
-       s32                     last_temp;
-       s32                     last_power;
-       int                     first;
-       u8                      adc_config;
-       s32                     pump_min;
-       s32                     pump_max;
-};
-
-/* Tickle FCU every 10 seconds */
-#define FCU_TICKLE_TICKS       10
-
-/*
- * Driver state
- */
-enum {
-       state_detached,
-       state_attaching,
-       state_attached,
-       state_detaching,
-};
-
-
-#endif /* __THERM_PMAC_7_2_H__ */
index 8735543eacdb9ae0961ed841c3f8e81628100520..493478989dbd4349b23716aa3dbdd92e0d1bc37f 100644 (file)
@@ -1127,6 +1127,24 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
                schedule_zero(tc, virt_block, data_dest, cell, bio);
 }
 
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+
+static void check_for_space(struct pool *pool)
+{
+       int r;
+       dm_block_t nr_free;
+
+       if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
+               return;
+
+       r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
+       if (r)
+               return;
+
+       if (nr_free)
+               set_pool_mode(pool, PM_WRITE);
+}
+
 /*
  * A non-zero return indicates read_only or fail_io mode.
  * Many callers don't care about the return value.
@@ -1141,6 +1159,8 @@ static int commit(struct pool *pool)
        r = dm_pool_commit_metadata(pool->pmd);
        if (r)
                metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
+       else
+               check_for_space(pool);
 
        return r;
 }
@@ -1159,8 +1179,6 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
        }
 }
 
-static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
-
 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
 {
        int r;
@@ -2155,7 +2173,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
                pool->process_cell = process_cell_read_only;
                pool->process_discard_cell = process_discard_cell;
                pool->process_prepared_mapping = process_prepared_mapping;
-               pool->process_prepared_discard = process_prepared_discard_passdown;
+               pool->process_prepared_discard = process_prepared_discard;
 
                if (!pool->pf.error_if_no_space && no_space_timeout)
                        queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
@@ -3814,6 +3832,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
                r = -EINVAL;
                goto bad;
        }
+       atomic_set(&tc->refcount, 1);
+       init_completion(&tc->can_destroy);
        list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
        spin_unlock_irqrestore(&tc->pool->lock, flags);
        /*
@@ -3826,9 +3846,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
        dm_put(pool_md);
 
-       atomic_set(&tc->refcount, 1);
-       init_completion(&tc->can_destroy);
-
        return 0;
 
 bad:
index 4c06585bf1657b076835c073f973b1dc780968d6..b98cd9d84435fe15ea1cb202850508f83b83204b 100644 (file)
@@ -899,7 +899,7 @@ static void disable_write_same(struct mapped_device *md)
 
 static void clone_endio(struct bio *bio, int error)
 {
-       int r = 0;
+       int r = error;
        struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
        struct dm_io *io = tio->io;
        struct mapped_device *md = tio->io->md;
index cca47210913524d3c57fb118894c97d0c18552f0..51fd6b524371ecd9ae762716e4cf33692c36ce45 100644 (file)
@@ -34,7 +34,8 @@ struct cxl_context *cxl_context_alloc(void)
 /*
  * Initialises a CXL context.
  */
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
+                    struct address_space *mapping)
 {
        int i;
 
@@ -42,6 +43,8 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
        ctx->afu = afu;
        ctx->master = master;
        ctx->pid = NULL; /* Set in start work ioctl */
+       mutex_init(&ctx->mapping_lock);
+       ctx->mapping = mapping;
 
        /*
         * Allocate the segment table before we put it in the IDR so that we
@@ -82,12 +85,12 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master)
         * Allocating IDR! We better make sure everything's setup that
         * dereferences from it.
         */
+       mutex_lock(&afu->contexts_lock);
        idr_preload(GFP_KERNEL);
-       spin_lock(&afu->contexts_lock);
        i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0,
                      ctx->afu->num_procs, GFP_NOWAIT);
-       spin_unlock(&afu->contexts_lock);
        idr_preload_end();
+       mutex_unlock(&afu->contexts_lock);
        if (i < 0)
                return i;
 
@@ -147,6 +150,12 @@ static void __detach_context(struct cxl_context *ctx)
        afu_release_irqs(ctx);
        flush_work(&ctx->fault_work); /* Only needed for dedicated process */
        wake_up_all(&ctx->wq);
+
+       /* Release Problem State Area mapping */
+       mutex_lock(&ctx->mapping_lock);
+       if (ctx->mapping)
+               unmap_mapping_range(ctx->mapping, 0, 0, 1);
+       mutex_unlock(&ctx->mapping_lock);
 }
 
 /*
@@ -168,21 +177,22 @@ void cxl_context_detach_all(struct cxl_afu *afu)
        struct cxl_context *ctx;
        int tmp;
 
-       rcu_read_lock();
-       idr_for_each_entry(&afu->contexts_idr, ctx, tmp)
+       mutex_lock(&afu->contexts_lock);
+       idr_for_each_entry(&afu->contexts_idr, ctx, tmp) {
                /*
                 * Anything done in here needs to be setup before the IDR is
                 * created and torn down after the IDR removed
                 */
                __detach_context(ctx);
-       rcu_read_unlock();
+       }
+       mutex_unlock(&afu->contexts_lock);
 }
 
 void cxl_context_free(struct cxl_context *ctx)
 {
-       spin_lock(&ctx->afu->contexts_lock);
+       mutex_lock(&ctx->afu->contexts_lock);
        idr_remove(&ctx->afu->contexts_idr, ctx->pe);
-       spin_unlock(&ctx->afu->contexts_lock);
+       mutex_unlock(&ctx->afu->contexts_lock);
        synchronize_rcu();
 
        free_page((u64)ctx->sstp);
index b5b6bda44a009c1732ef7832a9884ddc556ac127..28078f8894a5bee9270542d9c7d1c20992ff711a 100644 (file)
@@ -351,7 +351,7 @@ struct cxl_afu {
        struct device *chardev_s, *chardev_m, *chardev_d;
        struct idr contexts_idr;
        struct dentry *debugfs;
-       spinlock_t contexts_lock;
+       struct mutex contexts_lock;
        struct mutex spa_mutex;
        spinlock_t afu_cntl_lock;
 
@@ -398,6 +398,10 @@ struct cxl_context {
        phys_addr_t psn_phys;
        u64 psn_size;
 
+       /* Used to unmap any mmaps when force detaching */
+       struct address_space *mapping;
+       struct mutex mapping_lock;
+
        spinlock_t sste_lock; /* Protects segment table entries */
        struct cxl_sste *sstp;
        u64 sstp0, sstp1;
@@ -599,7 +603,8 @@ int cxl_alloc_sst(struct cxl_context *ctx);
 void init_cxl_native(void);
 
 struct cxl_context *cxl_context_alloc(void);
-int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master);
+int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
+                    struct address_space *mapping);
 void cxl_context_free(struct cxl_context *ctx);
 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma);
 
index 378b099e7c0b9681b6bb8105d2b7b0e6d74babea..e9f2f10dbb3734f3de4df60cdaed583415cfd831 100644 (file)
@@ -77,7 +77,7 @@ static int __afu_open(struct inode *inode, struct file *file, bool master)
                goto err_put_afu;
        }
 
-       if ((rc = cxl_context_init(ctx, afu, master)))
+       if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping)))
                goto err_put_afu;
 
        pr_devel("afu_open pe: %i\n", ctx->pe);
@@ -113,6 +113,10 @@ static int afu_release(struct inode *inode, struct file *file)
                 __func__, ctx->pe);
        cxl_context_detach(ctx);
 
+       mutex_lock(&ctx->mapping_lock);
+       ctx->mapping = NULL;
+       mutex_unlock(&ctx->mapping_lock);
+
        put_device(&ctx->afu->dev);
 
        /*
index 9a5a442269a870a7e22f699b77c5e041212b6bf1..f2b37b41a0da6e0b41fc7cbc8d1b02471ecf7fe7 100644 (file)
@@ -277,6 +277,7 @@ static int do_process_element_cmd(struct cxl_context *ctx,
                                  u64 cmd, u64 pe_state)
 {
        u64 state;
+       unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT);
 
        WARN_ON(!ctx->afu->enabled);
 
@@ -286,6 +287,10 @@ static int do_process_element_cmd(struct cxl_context *ctx,
        smp_mb();
        cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe);
        while (1) {
+               if (time_after_eq(jiffies, timeout)) {
+                       dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n");
+                       return -EBUSY;
+               }
                state = be64_to_cpup(ctx->afu->sw_command_status);
                if (state == ~0ULL) {
                        pr_err("cxl: Error adding process element to AFU\n");
@@ -610,13 +615,6 @@ static inline int detach_process_native_dedicated(struct cxl_context *ctx)
        return 0;
 }
 
-/*
- * TODO: handle case when this is called inside a rcu_read_lock() which may
- * happen when we unbind the driver (ie. cxl_context_detach_all()) .  Terminate
- * & remove use a mutex lock and schedule which will not good with lock held.
- * May need to write do_process_element_cmd() that handles outstanding page
- * faults synchronously.
- */
 static inline int detach_process_native_afu_directed(struct cxl_context *ctx)
 {
        if (!ctx->pe_inserted)
index 10c98ab7f46e1c7fb6a796d4dde06d194fe97204..0f2cc9f8b4dbcd456bbccfab21bf3124f660e6be 100644 (file)
@@ -502,7 +502,7 @@ static struct cxl_afu *cxl_alloc_afu(struct cxl *adapter, int slice)
        afu->dev.release = cxl_release_afu;
        afu->slice = slice;
        idr_init(&afu->contexts_idr);
-       spin_lock_init(&afu->contexts_lock);
+       mutex_init(&afu->contexts_lock);
        spin_lock_init(&afu->afu_cntl_lock);
        mutex_init(&afu->spa_mutex);
 
index ce7ec06d87d13d6f0cadce4d254a715e6d2810fc..461bdbd5d48317fa941ac002aa1f014761336f9a 100644 (file)
@@ -121,7 +121,7 @@ static ssize_t reset_store_afu(struct device *device,
        int rc;
 
        /* Not safe to reset if it is currently in use */
-       spin_lock(&afu->contexts_lock);
+       mutex_lock(&afu->contexts_lock);
        if (!idr_is_empty(&afu->contexts_idr)) {
                rc = -EBUSY;
                goto err;
@@ -132,7 +132,7 @@ static ssize_t reset_store_afu(struct device *device,
 
        rc = count;
 err:
-       spin_unlock(&afu->contexts_lock);
+       mutex_unlock(&afu->contexts_lock);
        return rc;
 }
 
@@ -247,7 +247,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
        int rc = -EBUSY;
 
        /* can't change this if we have a user */
-       spin_lock(&afu->contexts_lock);
+       mutex_lock(&afu->contexts_lock);
        if (!idr_is_empty(&afu->contexts_idr))
                goto err;
 
@@ -271,7 +271,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
        afu->current_mode = 0;
        afu->num_procs = 0;
 
-       spin_unlock(&afu->contexts_lock);
+       mutex_unlock(&afu->contexts_lock);
 
        if ((rc = _cxl_afu_deactivate_mode(afu, old_mode)))
                return rc;
@@ -280,7 +280,7 @@ static ssize_t mode_store(struct device *device, struct device_attribute *attr,
 
        return count;
 err:
-       spin_unlock(&afu->contexts_lock);
+       mutex_unlock(&afu->contexts_lock);
        return rc;
 }
 
index 02ad79229f65ecf0b50b9bf29a1f63c0d297910e..7466ce098e60a086e4f60c5111c19c1b85cd6d60 100644 (file)
@@ -886,7 +886,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
        unsigned idx, bus_width = 0;
        int err = 0;
 
-       if (!mmc_can_ext_csd(card) &&
+       if (!mmc_can_ext_csd(card) ||
            !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
                return 0;
 
index cced84233ac0f9e11f0b7a3ddb5dad39817325a9..7a8f1c5e65af19fce7bd7460fcc59cad5df2e905 100644 (file)
@@ -67,7 +67,7 @@ config XEN_PCIDEV_FRONTEND
 config HT_IRQ
        bool "Interrupts on hypertransport devices"
        default y
-       depends on PCI && X86_LOCAL_APIC && X86_IO_APIC
+       depends on PCI && X86_LOCAL_APIC
        help
           This allows native hypertransport devices to use interrupts.
 
@@ -110,13 +110,6 @@ config PCI_PASID
 
          If unsure, say N.
 
-config PCI_IOAPIC
-       bool "PCI IO-APIC hotplug support" if X86
-       depends on PCI
-       depends on ACPI
-       depends on X86_IO_APIC
-       default !X86
-
 config PCI_LABEL
        def_bool y if (DMI || ACPI)
        select NLS
index e04fe2d9df3b0e2f11ac8a11968bbb1cc629b322..73e4af400a5a6bc2d4985c1550228ecbf90b45ab 100644 (file)
@@ -13,8 +13,6 @@ obj-$(CONFIG_PCI_QUIRKS) += quirks.o
 # Build PCI Express stuff if needed
 obj-$(CONFIG_PCIEPORTBUS) += pcie/
 
-obj-$(CONFIG_PCI_IOAPIC) += ioapic.o
-
 # Build the PCI Hotplug drivers if we were asked to
 obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
 ifdef CONFIG_HOTPLUG_PCI
index 3efaf4c38528adc35fc693240d5b4ddb52c6d5cf..96c5c729cdbcb9069ece7a12c03157902538caef 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/wait.h>
 #include "../pci.h"
 #include <asm/pci_x86.h>               /* for struct irq_routing_table */
+#include <asm/io_apic.h>
 #include "ibmphp.h"
 
 #define attn_on(sl)  ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON)
@@ -155,13 +156,10 @@ int ibmphp_init_devno(struct slot **cur_slot)
        for (loop = 0; loop < len; loop++) {
                if ((*cur_slot)->number == rtable->slots[loop].slot &&
                    (*cur_slot)->bus == rtable->slots[loop].bus) {
-                       struct io_apic_irq_attr irq_attr;
-
                        (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn);
                        for (i = 0; i < 4; i++)
                                (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus,
-                                               (int) (*cur_slot)->device, i,
-                                               &irq_attr);
+                                               (int) (*cur_slot)->device, i);
 
                        debug("(*cur_slot)->irq[0] = %x\n",
                                        (*cur_slot)->irq[0]);
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
deleted file mode 100644 (file)
index f6219d3..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-/*
- * IOAPIC/IOxAPIC/IOSAPIC driver
- *
- * Copyright (C) 2009 Fujitsu Limited.
- * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-/*
- * This driver manages PCI I/O APICs added by hotplug after boot.  We try to
- * claim all I/O APIC PCI devices, but those present at boot were registered
- * when we parsed the ACPI MADT, so we'll fail when we try to re-register
- * them.
- */
-
-#include <linux/pci.h>
-#include <linux/module.h>
-#include <linux/acpi.h>
-#include <linux/slab.h>
-
-struct ioapic {
-       acpi_handle     handle;
-       u32             gsi_base;
-};
-
-static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
-{
-       acpi_handle handle;
-       acpi_status status;
-       unsigned long long gsb;
-       struct ioapic *ioapic;
-       int ret;
-       char *type;
-       struct resource *res;
-
-       handle = ACPI_HANDLE(&dev->dev);
-       if (!handle)
-               return -EINVAL;
-
-       status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb);
-       if (ACPI_FAILURE(status))
-               return -EINVAL;
-
-       /*
-        * The previous code in acpiphp evaluated _MAT if _GSB failed, but
-        * ACPI spec 4.0 sec 6.2.2 requires _GSB for hot-pluggable I/O APICs.
-        */
-
-       ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL);
-       if (!ioapic)
-               return -ENOMEM;
-
-       ioapic->handle = handle;
-       ioapic->gsi_base = (u32) gsb;
-
-       if (dev->class == PCI_CLASS_SYSTEM_PIC_IOAPIC)
-               type = "IOAPIC";
-       else
-               type = "IOxAPIC";
-
-       ret = pci_enable_device(dev);
-       if (ret < 0)
-               goto exit_free;
-
-       pci_set_master(dev);
-
-       if (pci_request_region(dev, 0, type))
-               goto exit_disable;
-
-       res = &dev->resource[0];
-       if (acpi_register_ioapic(ioapic->handle, res->start, ioapic->gsi_base))
-               goto exit_release;
-
-       pci_set_drvdata(dev, ioapic);
-       dev_info(&dev->dev, "%s at %pR, GSI %u\n", type, res, ioapic->gsi_base);
-       return 0;
-
-exit_release:
-       pci_release_region(dev, 0);
-exit_disable:
-       pci_disable_device(dev);
-exit_free:
-       kfree(ioapic);
-       return -ENODEV;
-}
-
-static void ioapic_remove(struct pci_dev *dev)
-{
-       struct ioapic *ioapic = pci_get_drvdata(dev);
-
-       acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base);
-       pci_release_region(dev, 0);
-       pci_disable_device(dev);
-       kfree(ioapic);
-}
-
-
-static const struct pci_device_id ioapic_devices[] = {
-       { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) },
-       { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) },
-       { }
-};
-MODULE_DEVICE_TABLE(pci, ioapic_devices);
-
-static struct pci_driver ioapic_driver = {
-       .name           = "ioapic",
-       .id_table       = ioapic_devices,
-       .probe          = ioapic_probe,
-       .remove         = ioapic_remove,
-};
-
-static int __init ioapic_init(void)
-{
-       return pci_register_driver(&ioapic_driver);
-}
-module_init(ioapic_init);
-
-MODULE_LICENSE("GPL");
index c71443c4f265780b1fe7fa001e0a3fd6101a4886..97b5e4ee1ca40ae4bc5b50ab7e413833de693af7 100644 (file)
@@ -1041,6 +1041,7 @@ static const struct x86_cpu_id rapl_ids[] = {
        RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
        RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
        RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
+       RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
        RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
        {}
 };
index c1444c3d84c2823a8b4eaed28e9bf5036290ce26..2809ae0d6bcd9848bd15cbc8d4d45e69df1601a3 100644 (file)
@@ -570,7 +570,7 @@ static struct regulator_ops s2mps14_reg_ops = {
        .enable_mask    = S2MPS14_ENABLE_MASK           \
 }
 
-#define regulator_desc_s2mps14_buck(num, min, step) {          \
+#define regulator_desc_s2mps14_buck(num, min, step, min_sel) { \
        .name           = "BUCK"#num,                           \
        .id             = S2MPS14_BUCK##num,                    \
        .ops            = &s2mps14_reg_ops,                     \
@@ -579,7 +579,7 @@ static struct regulator_ops s2mps14_reg_ops = {
        .min_uV         = min,                                  \
        .uV_step        = step,                                 \
        .n_voltages     = S2MPS14_BUCK_N_VOLTAGES,              \
-       .linear_min_sel = S2MPS14_BUCK1235_START_SEL,           \
+       .linear_min_sel = min_sel,                              \
        .ramp_delay     = S2MPS14_BUCK_RAMP_DELAY,              \
        .vsel_reg       = S2MPS14_REG_B1CTRL2 + (num - 1) * 2,  \
        .vsel_mask      = S2MPS14_BUCK_VSEL_MASK,               \
@@ -613,11 +613,16 @@ static const struct regulator_desc s2mps14_regulators[] = {
        regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV),
        regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV),
        regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV),
-       regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV),
-       regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV),
-       regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV),
+       regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV,
+                                   S2MPS14_BUCK1235_START_SEL),
+       regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV,
+                                   S2MPS14_BUCK1235_START_SEL),
+       regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV,
+                                   S2MPS14_BUCK1235_START_SEL),
+       regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV,
+                                   S2MPS14_BUCK4_START_SEL),
+       regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV,
+                                   S2MPS14_BUCK1235_START_SEL),
 };
 
 static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11,
index aa915da2a5e53c6a094b591e9effce404e813b21..82abfce1cb42b2b07c64b081beffd7aa5a538849 100644 (file)
@@ -176,7 +176,6 @@ STATIC int NCR_700_slave_alloc(struct scsi_device *SDpnt);
 STATIC int NCR_700_slave_configure(struct scsi_device *SDpnt);
 STATIC void NCR_700_slave_destroy(struct scsi_device *SDpnt);
 static int NCR_700_change_queue_depth(struct scsi_device *SDpnt, int depth);
-static int NCR_700_change_queue_type(struct scsi_device *SDpnt, int depth);
 
 STATIC struct device_attribute *NCR_700_dev_attrs[];
 
@@ -326,7 +325,6 @@ NCR_700_detect(struct scsi_host_template *tpnt,
        tpnt->slave_destroy = NCR_700_slave_destroy;
        tpnt->slave_alloc = NCR_700_slave_alloc;
        tpnt->change_queue_depth = NCR_700_change_queue_depth;
-       tpnt->change_queue_type = NCR_700_change_queue_type;
        tpnt->use_blk_tags = 1;
 
        if(tpnt->name == NULL)
@@ -904,8 +902,8 @@ process_message(struct Scsi_Host *host,     struct NCR_700_Host_Parameters *hostdata
                        hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
 
                        SCp->device->tagged_supported = 0;
+                       SCp->device->simple_tags = 0;
                        scsi_change_queue_depth(SCp->device, host->cmd_per_lun);
-                       scsi_set_tag_type(SCp->device, 0);
                } else {
                        shost_printk(KERN_WARNING, host,
                                "(%d:%d) Unexpected REJECT Message %s\n",
@@ -1818,8 +1816,8 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
                hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
        }
 
-       if((hostdata->tag_negotiated &(1<<scmd_id(SCp)))
-          && scsi_get_tag_type(SCp->device)) {
+       if ((hostdata->tag_negotiated & (1<<scmd_id(SCp))) &&
+           SCp->device->simple_tags) {
                slot->tag = SCp->request->tag;
                CDEBUG(KERN_DEBUG, SCp, "sending out tag %d, slot %p\n",
                       slot->tag, slot);
@@ -2082,39 +2080,6 @@ NCR_700_change_queue_depth(struct scsi_device *SDp, int depth)
        return scsi_change_queue_depth(SDp, depth);
 }
 
-static int NCR_700_change_queue_type(struct scsi_device *SDp, int tag_type)
-{
-       int change_tag = ((tag_type ==0 &&  scsi_get_tag_type(SDp) != 0)
-                         || (tag_type != 0 && scsi_get_tag_type(SDp) == 0));
-       struct NCR_700_Host_Parameters *hostdata = 
-               (struct NCR_700_Host_Parameters *)SDp->host->hostdata[0];
-
-       /* We have a global (per target) flag to track whether TCQ is
-        * enabled, so we'll be turning it off for the entire target here.
-        * our tag algorithm will fail if we mix tagged and untagged commands,
-        * so quiesce the device before doing this */
-       if (change_tag)
-               scsi_target_quiesce(SDp->sdev_target);
-
-       scsi_set_tag_type(SDp, tag_type);
-       if (!tag_type) {
-               /* shift back to the default unqueued number of commands
-                * (the user can still raise this) */
-               scsi_change_queue_depth(SDp, SDp->host->cmd_per_lun);
-               hostdata->tag_negotiated &= ~(1 << sdev_id(SDp));
-       } else {
-               /* Here, we cleared the negotiation flag above, so this
-                * will force the driver to renegotiate */
-               scsi_change_queue_depth(SDp, SDp->queue_depth);
-               if (change_tag)
-                       NCR_700_set_tag_neg_state(SDp, NCR_700_START_TAG_NEGOTIATION);
-       }
-       if (change_tag)
-               scsi_target_resume(SDp->sdev_target);
-
-       return tag_type;
-}
-
 static ssize_t
 NCR_700_show_active_tags(struct device *dev, struct device_attribute *attr, char *buf)
 {
index 86cf3d671eb99bdae4b9eb9baa75e451c852f6ff..9c92f415229f17220f789f1ce1c76179b946b7af 100644 (file)
@@ -1462,18 +1462,17 @@ config SCSI_WD719X
          SCSI controllers (based on WD33C296A chip).
 
 config SCSI_DEBUG
-       tristate "SCSI debugging host simulator"
+       tristate "SCSI debugging host and device simulator"
        depends on SCSI
        select CRC_T10DIF
        help
-         This is a host adapter simulator that can simulate multiple hosts
-         each with multiple dummy SCSI devices (disks). It defaults to one
-         host adapter with one dummy SCSI disk. Each dummy disk uses kernel
-         RAM as storage (i.e. it is a ramdisk). To save space when multiple
-         dummy disks are simulated, they share the same kernel RAM for 
-         their storage. See <http://sg.danny.cz/sg/sdebug26.html> for more
-         information. This driver is primarily of use to those testing the
-         SCSI and block subsystems. If unsure, say N.
+         This pseudo driver simulates one or more hosts (SCSI initiators),
+         each with one or more targets, each with one or more logical units.
+         Defaults to one of each, creating a small RAM disk device. Many
+         parameters found in the /sys/bus/pseudo/drivers/scsi_debug
+         directory can be tweaked at run time.
+         See <http://sg.danny.cz/sg/sdebug26.html> for more information.
+         Mainly used for testing and best as a module. If unsure, say N.
 
 config SCSI_MESH
        tristate "MESH (Power Mac internal SCSI) support"
index 6719a3390ebd7ad9781a52552616859bf836b2ab..2c5ce48c8f956127e118046aa1a248cc0adc132e 100644 (file)
@@ -7921,9 +7921,9 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
         */
        if ((asc_dvc->cur_dvc_qng[scp->device->id] > 0) &&
            (boardp->reqcnt[scp->device->id] % 255) == 0) {
-               asc_scsi_q->q2.tag_code = MSG_ORDERED_TAG;
+               asc_scsi_q->q2.tag_code = ORDERED_QUEUE_TAG;
        } else {
-               asc_scsi_q->q2.tag_code = MSG_SIMPLE_TAG;
+               asc_scsi_q->q2.tag_code = SIMPLE_QUEUE_TAG;
        }
 
        /* Build ASC_SCSI_Q */
@@ -8351,7 +8351,7 @@ static int AscPutReadyQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq, uchar q_no)
        }
        q_addr = ASC_QNO_TO_QADDR(q_no);
        if ((scsiq->q1.target_id & asc_dvc->use_tagged_qng) == 0) {
-               scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG;
+               scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG;
        }
        scsiq->q1.status = QS_FREE;
        AscMemWordCopyPtrToLram(iop_base,
@@ -8669,7 +8669,7 @@ static int AscExeScsiQueue(ASC_DVC_VAR *asc_dvc, ASC_SCSI_Q *scsiq)
                }
        }
        if (disable_syn_offset_one_fix) {
-               scsiq->q2.tag_code &= ~MSG_SIMPLE_TAG;
+               scsiq->q2.tag_code &= ~SIMPLE_QUEUE_TAG;
                scsiq->q2.tag_code |= (ASC_TAG_FLAG_DISABLE_ASYN_USE_SYN_FIX |
                                       ASC_TAG_FLAG_DISABLE_DISCONNECT);
        } else {
index 14fc018436c235cc3d2267fb889d4831f4dc60ce..02a2512b76a8ee540e0454f5384f37afbcf20dde 100644 (file)
@@ -63,7 +63,6 @@ static struct scsi_host_template aic94xx_sht = {
        .scan_finished          = asd_scan_finished,
        .scan_start             = asd_scan_start,
        .change_queue_depth     = sas_change_queue_depth,
-       .change_queue_type      = sas_change_queue_type,
        .bios_param             = sas_bios_param,
        .can_queue              = 1,
        .cmd_per_lun            = 1,
index e861f286b42e494339eb46d5e58510911f7d43c8..98d06d15195806cd915a7ccf23f19447e27854d3 100644 (file)
@@ -2792,7 +2792,6 @@ static struct scsi_host_template bnx2fc_shost_template = {
        .eh_host_reset_handler  = fc_eh_host_reset,
        .slave_alloc            = fc_slave_alloc,
        .change_queue_depth     = scsi_change_queue_depth,
-       .change_queue_type      = scsi_change_queue_type,
        .this_id                = -1,
        .cmd_per_lun            = 3,
        .use_clustering         = ENABLE_CLUSTERING,
index 4b56858c1df29f17fb5867158ec50eb683ae8b44..9ecca8504f60e64084e1e55487602011e7386e30 100644 (file)
@@ -1737,11 +1737,7 @@ void bnx2fc_build_fcp_cmnd(struct bnx2fc_cmd *io_req,
        fcp_cmnd->fc_pri_ta = 0;
        fcp_cmnd->fc_tm_flags = io_req->mp_req.tm_flags;
        fcp_cmnd->fc_flags = io_req->io_req_flags;
-
-       if (sc_cmd->flags & SCMD_TAGGED)
-               fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
-       else
-               fcp_cmnd->fc_pri_ta = 0;
+       fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
 }
 
 static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req,
index 51ea5dc5f0848d7db70af3fdf6f6c6eaf63f1311..3987284e0d2abcdc98e5158c41245f7d5a76045c 100644 (file)
@@ -172,10 +172,7 @@ csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
                fcp_cmnd->fc_cmdref = 0;
 
                memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
-               if (scmnd->flags & SCMD_TAGGED)
-                       fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
-               else
-                       fcp_cmnd->fc_pri_ta = 0;
+               fcp_cmnd->fc_pri_ta = FCP_PTA_SIMPLE;
                fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
 
                if (req->nsge)
index b7dc59fca7a633c950d9760a82266a9feea376da..7bd376d95ed5225e0490ce4939775aa609eea74f 100644 (file)
@@ -684,9 +684,9 @@ static u16 calc_fi_checksum(struct esas2r_flash_context *fc)
  *              1)  verify the fi_version is correct
  *              2)  verify the checksum of the entire image.
  *              3)  validate the adap_typ, action and length fields.
- *              4)  valdiate each component header. check the img_type and
+ *              4)  validate each component header. check the img_type and
  *                  length fields
- *              5)  valdiate each component image.  validate signatures and
+ *              5)  validate each component image.  validate signatures and
  *                  local checksums
  */
 static bool verify_fi(struct esas2r_adapter *a,
index 593ff8a63c706a85e8ca61df243160244cccf991..7e1c21e6736b7aef9a2ba5cebc308298b7100e2b 100644 (file)
@@ -255,7 +255,6 @@ static struct scsi_host_template driver_template = {
        .emulated                       = 0,
        .proc_name                      = ESAS2R_DRVR_NAME,
        .change_queue_depth             = scsi_change_queue_depth,
-       .change_queue_type              = scsi_change_queue_type,
        .max_sectors                    = 0xFFFF,
        .use_blk_tags                   = 1,
 };
index cd00a6cdf55b7e19eaf38c4c65e289f5744594b3..ec193a8357d70cdf3cbc0354bb7a10b92f7e3372 100644 (file)
@@ -281,7 +281,6 @@ static struct scsi_host_template fcoe_shost_template = {
        .eh_host_reset_handler = fc_eh_host_reset,
        .slave_alloc = fc_slave_alloc,
        .change_queue_depth = scsi_change_queue_depth,
-       .change_queue_type = scsi_change_queue_type,
        .this_id = -1,
        .cmd_per_lun = 3,
        .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
index 0c1f8177b5b72abf8f0c4d3d0b587dbb85a85226..8a0d4d7b3254d715c32cfcc41bb0fc66dd221221 100644 (file)
@@ -111,7 +111,6 @@ static struct scsi_host_template fnic_host_template = {
        .eh_host_reset_handler = fnic_host_reset,
        .slave_alloc = fnic_slave_alloc,
        .change_queue_depth = scsi_change_queue_depth,
-       .change_queue_type = scsi_change_queue_type,
        .this_id = -1,
        .cmd_per_lun = 3,
        .can_queue = FNIC_DFLT_IO_REQ,
index f58c6d8e02644fcae6d2b82d0e6858c24faa3664..057d27721d5b24d23e24c3040b64507507556400 100644 (file)
@@ -1615,7 +1615,6 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
        struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
        struct ibmvfc_cmd *vfc_cmd;
        struct ibmvfc_event *evt;
-       u8 tag[2];
        int rc;
 
        if (unlikely((rc = fc_remote_port_chkready(rport))) ||
@@ -3089,7 +3088,6 @@ static struct scsi_host_template driver_template = {
        .target_alloc = ibmvfc_target_alloc,
        .scan_finished = ibmvfc_scan_finished,
        .change_queue_depth = ibmvfc_change_queue_depth,
-       .change_queue_type = scsi_change_queue_type,
        .cmd_per_lun = 16,
        .can_queue = IBMVFC_MAX_REQUESTS_DEFAULT,
        .this_id = -1,
index 5402943893559cb47d3f16a33ebf7db294ee2b74..df4e27cd996a3c68cf095e40d7a6ccf7ebf6a065 100644 (file)
@@ -1426,16 +1426,14 @@ static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
                if (res->sdev) {
                        res->del_from_ml = 1;
                        res->res_handle = IPR_INVALID_RES_HANDLE;
-                       if (ioa_cfg->allow_ml_add_del)
-                               schedule_work(&ioa_cfg->work_q);
+                       schedule_work(&ioa_cfg->work_q);
                } else {
                        ipr_clear_res_target(res);
                        list_move_tail(&res->queue, &ioa_cfg->free_res_q);
                }
        } else if (!res->sdev || res->del_from_ml) {
                res->add_to_ml = 1;
-               if (ioa_cfg->allow_ml_add_del)
-                       schedule_work(&ioa_cfg->work_q);
+               schedule_work(&ioa_cfg->work_q);
        }
 
        ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
@@ -3273,8 +3271,7 @@ static void ipr_worker_thread(struct work_struct *work)
 restart:
        do {
                did_work = 0;
-               if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
-                   !ioa_cfg->allow_ml_add_del) {
+               if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
                        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
                        return;
                }
@@ -3311,6 +3308,7 @@ restart:
                }
        }
 
+       ioa_cfg->scan_done = 1;
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
        kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
        LEAVE;
@@ -4345,30 +4343,6 @@ static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
        return sdev->queue_depth;
 }
 
-/**
- * ipr_change_queue_type - Change the device's queue type
- * @dsev:              scsi device struct
- * @tag_type:  type of tags to use
- *
- * Return value:
- *     actual queue type set
- **/
-static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
-{
-       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
-       struct ipr_resource_entry *res;
-       unsigned long lock_flags = 0;
-
-       spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-       res = (struct ipr_resource_entry *)sdev->hostdata;
-       if (res && ipr_is_gscsi(res))
-               tag_type = scsi_change_queue_type(sdev, tag_type);
-       else
-               tag_type = 0;
-       spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-       return tag_type;
-}
-
 /**
  * ipr_show_adapter_handle - Show the adapter's resource handle for this device
  * @dev:       device struct
@@ -4739,6 +4713,7 @@ static int ipr_slave_configure(struct scsi_device *sdev)
                        sdev->no_uld_attach = 1;
                }
                if (ipr_is_vset_device(res)) {
+                       sdev->scsi_level = SCSI_SPC_3;
                        blk_queue_rq_timeout(sdev->request_queue,
                                             IPR_VSET_RW_TIMEOUT);
                        blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
@@ -5231,6 +5206,28 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
  * @scsi_cmd:  scsi command struct
  *
  * Return value:
+ *     0 if scan in progress / 1 if scan is complete
+ **/
+static int ipr_scan_finished(struct Scsi_Host *shost, unsigned long elapsed_time)
+{
+       unsigned long lock_flags;
+       struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
+       int rc = 0;
+
+       spin_lock_irqsave(shost->host_lock, lock_flags);
+       if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead || ioa_cfg->scan_done)
+               rc = 1;
+       if ((elapsed_time/HZ) > (ioa_cfg->transop_timeout * 2))
+               rc = 1;
+       spin_unlock_irqrestore(shost->host_lock, lock_flags);
+       return rc;
+}
+
+/**
+ * ipr_eh_host_reset - Reset the host adapter
+ * @scsi_cmd:  scsi command struct
+ *
+ * Return value:
  *     SUCCESS / FAILED
  **/
 static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
@@ -5779,7 +5776,7 @@ static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
 
        ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
 
-       if (!scsi_get_tag_type(scsi_cmd->device)) {
+       if (!scsi_cmd->device->simple_tags) {
                ipr_erp_request_sense(ipr_cmd);
                return;
        }
@@ -6299,10 +6296,10 @@ static struct scsi_host_template driver_template = {
        .slave_alloc = ipr_slave_alloc,
        .slave_configure = ipr_slave_configure,
        .slave_destroy = ipr_slave_destroy,
+       .scan_finished = ipr_scan_finished,
        .target_alloc = ipr_target_alloc,
        .target_destroy = ipr_target_destroy,
        .change_queue_depth = ipr_change_queue_depth,
-       .change_queue_type = ipr_change_queue_type,
        .bios_param = ipr_biosparam,
        .can_queue = IPR_MAX_COMMANDS,
        .this_id = -1,
@@ -6841,7 +6838,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
        ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
 
        list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
-               if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
+               if (res->add_to_ml || res->del_from_ml) {
                        ipr_trace;
                        break;
                }
@@ -6870,6 +6867,7 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
        if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
                scsi_block_requests(ioa_cfg->host);
 
+       schedule_work(&ioa_cfg->work_q);
        LEAVE;
        return IPR_RC_JOB_RETURN;
 }
@@ -7610,6 +7608,19 @@ static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
        type[4] = '\0';
        ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
 
+       if (ipr_invalid_adapter(ioa_cfg)) {
+               dev_err(&ioa_cfg->pdev->dev,
+                       "Adapter not supported in this hardware configuration.\n");
+
+               if (!ipr_testmode) {
+                       ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
+                       ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+                       list_add_tail(&ipr_cmd->queue,
+                                       &ioa_cfg->hrrq->hrrq_free_q);
+                       return IPR_RC_JOB_RETURN;
+               }
+       }
+
        ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
 
        ipr_ioafp_inquiry(ipr_cmd, 1, 0,
@@ -8797,20 +8808,6 @@ static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
                _ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
                                        IPR_SHUTDOWN_NONE);
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
-       wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
-       spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
-
-       if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
-               rc = -EIO;
-       } else if (ipr_invalid_adapter(ioa_cfg)) {
-               if (!ipr_testmode)
-                       rc = -EIO;
-
-               dev_err(&ioa_cfg->pdev->dev,
-                       "Adapter not supported in this hardware configuration.\n");
-       }
-
-       spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
 
        LEAVE;
        return rc;
@@ -9264,7 +9261,7 @@ static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
                                               * ioa_cfg->max_devs_supported)));
        }
 
-       host->max_channel = IPR_MAX_BUS_TO_SCAN;
+       host->max_channel = IPR_VSET_BUS;
        host->unique_id = host->host_no;
        host->max_cmd_len = IPR_MAX_CDB_LEN;
        host->can_queue = ioa_cfg->max_cmds;
@@ -9763,25 +9760,6 @@ out_scsi_host_put:
        goto out;
 }
 
-/**
- * ipr_scan_vsets - Scans for VSET devices
- * @ioa_cfg:   ioa config struct
- *
- * Description: Since the VSET resources do not follow SAM in that we can have
- * sparse LUNs with no LUN 0, we have to scan for these ourselves.
- *
- * Return value:
- *     none
- **/
-static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
-{
-       int target, lun;
-
-       for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
-               for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++)
-                       scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
-}
-
 /**
  * ipr_initiate_ioa_bringdown - Bring down an adapter
  * @ioa_cfg:           ioa config struct
@@ -9937,10 +9915,6 @@ static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
        }
 
        scsi_scan_host(ioa_cfg->host);
-       ipr_scan_vsets(ioa_cfg);
-       scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
-       ioa_cfg->allow_ml_add_del = 1;
-       ioa_cfg->host->max_channel = IPR_VSET_BUS;
        ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
 
        if (ioa_cfg->iopoll_weight && ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
index 9ebdebd944e7b3cf94a970ab41117b6e8aacadbe..b4f3eec51bc9b19783f931116f63aa70bc2d235a 100644 (file)
 
 #define IPR_MAX_NUM_TARGETS_PER_BUS                    256
 #define IPR_MAX_NUM_LUNS_PER_TARGET                    256
-#define IPR_MAX_NUM_VSET_LUNS_PER_TARGET       8
 #define IPR_VSET_BUS                                   0xff
 #define IPR_IOA_BUS                                            0xff
 #define IPR_IOA_TARGET                                 0xff
 #define IPR_IOA_LUN                                            0xff
 #define IPR_MAX_NUM_BUSES                              16
-#define IPR_MAX_BUS_TO_SCAN                            IPR_MAX_NUM_BUSES
 
 #define IPR_NUM_RESET_RELOAD_RETRIES           3
 
@@ -1453,7 +1451,7 @@ struct ipr_ioa_cfg {
        u8 in_ioa_bringdown:1;
        u8 ioa_unit_checked:1;
        u8 dump_taken:1;
-       u8 allow_ml_add_del:1;
+       u8 scan_done:1;
        u8 needs_hard_reset:1;
        u8 dual_raid:1;
        u8 needs_warm_reset:1;
index 724c6265b667a559c762c5b930caa60c6558f962..cd41b63a2f10f3c943b3463efd9e633999708501 100644 (file)
@@ -158,7 +158,6 @@ static struct scsi_host_template isci_sht = {
        .scan_finished                  = isci_host_scan_finished,
        .scan_start                     = isci_host_start,
        .change_queue_depth             = sas_change_queue_depth,
-       .change_queue_type              = sas_change_queue_type,
        .bios_param                     = sas_bios_param,
        .can_queue                      = ISCI_CAN_QUEUE_VAL,
        .cmd_per_lun                    = 1,
index 72918d227ead30811a0b43f95d6942f7b7d191fe..519dac4e341e3d05af47d35fd2580a8a8ecc2c67 100644 (file)
@@ -906,13 +906,6 @@ int sas_change_queue_depth(struct scsi_device *sdev, int depth)
        return scsi_change_queue_depth(sdev, depth);
 }
 
-int sas_change_queue_type(struct scsi_device *scsi_dev, int type)
-{
-       if (dev_is_sata(sdev_to_domain_dev(scsi_dev)))
-               return -EINVAL;
-       return scsi_change_queue_type(scsi_dev, type);
-}
-
 int sas_bios_param(struct scsi_device *scsi_dev,
                          struct block_device *bdev,
                          sector_t capacity, int *hsc)
@@ -1011,7 +1004,6 @@ EXPORT_SYMBOL_GPL(sas_queuecommand);
 EXPORT_SYMBOL_GPL(sas_target_alloc);
 EXPORT_SYMBOL_GPL(sas_slave_configure);
 EXPORT_SYMBOL_GPL(sas_change_queue_depth);
-EXPORT_SYMBOL_GPL(sas_change_queue_type);
 EXPORT_SYMBOL_GPL(sas_bios_param);
 EXPORT_SYMBOL_GPL(sas_task_abort);
 EXPORT_SYMBOL_GPL(sas_phy_reset);
index fd85952b621d6e76d8db7d00682671d1458a2c78..4f9222eb22669b6a84f0e5cad891958e6e8c6e22 100644 (file)
@@ -5879,7 +5879,6 @@ struct scsi_host_template lpfc_template = {
        .max_sectors            = 0xFFFF,
        .vendor_id              = LPFC_NL_VENDOR_ID,
        .change_queue_depth     = scsi_change_queue_depth,
-       .change_queue_type      = scsi_change_queue_type,
        .use_blk_tags           = 1,
        .track_queue_depth      = 1,
 };
@@ -5904,7 +5903,6 @@ struct scsi_host_template lpfc_vport_template = {
        .shost_attrs            = lpfc_vport_attrs,
        .max_sectors            = 0xFFFF,
        .change_queue_depth     = scsi_change_queue_depth,
-       .change_queue_type      = scsi_change_queue_type,
        .use_blk_tags           = 1,
        .track_queue_depth      = 1,
 };
index 8431eb10bbb1fa4f89bdbfbbdb2e5a74b3c10086..6a1c036a6f3f085cabd6ad69838b64d59290507d 100644 (file)
@@ -7592,7 +7592,6 @@ static struct scsi_host_template scsih_driver_template = {
        .scan_finished                  = _scsih_scan_finished,
        .scan_start                     = _scsih_scan_start,
        .change_queue_depth             = _scsih_change_queue_depth,
-       .change_queue_type              = scsi_change_queue_type,
        .eh_abort_handler               = _scsih_abort,
        .eh_device_reset_handler        = _scsih_dev_reset,
        .eh_target_reset_handler        = _scsih_target_reset,
index 0d1d06488a28db1c97e98f4158bf967a637011bb..e689bf20a3ea541b93f94cb50cc900e5ade3a751 100644 (file)
@@ -1006,12 +1006,9 @@ mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
                    &mpt2sas_phy->remote_identify);
                _transport_add_phy_to_an_existing_port(ioc, sas_node,
                    mpt2sas_phy, mpt2sas_phy->remote_identify.sas_address);
-       } else {
+       } else
                memset(&mpt2sas_phy->remote_identify, 0 , sizeof(struct
                    sas_identify));
-               _transport_del_phy_from_an_existing_port(ioc, sas_node,
-                   mpt2sas_phy);
-       }
 
        if (mpt2sas_phy->phy)
                mpt2sas_phy->phy->negotiated_linkrate =
index a2b60991efd4f5651f0eab185156c08e3db1a26c..94261ee9e72dcc5896ed4807e1f86c32670126e3 100644 (file)
@@ -7229,7 +7229,6 @@ static struct scsi_host_template scsih_driver_template = {
        .scan_finished                  = _scsih_scan_finished,
        .scan_start                     = _scsih_scan_start,
        .change_queue_depth             = _scsih_change_queue_depth,
-       .change_queue_type              = scsi_change_queue_type,
        .eh_abort_handler               = _scsih_abort,
        .eh_device_reset_handler        = _scsih_dev_reset,
        .eh_target_reset_handler        = _scsih_target_reset,
index d4bafaaebea91496438d3f3fd584951644084be3..3637ae6c0171190f34c0050e6a1682ce5982d523 100644 (file)
@@ -1003,12 +1003,9 @@ mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
                    &mpt3sas_phy->remote_identify);
                _transport_add_phy_to_an_existing_port(ioc, sas_node,
                    mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
-       } else {
+       } else
                memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
                    sas_identify));
-               _transport_del_phy_from_an_existing_port(ioc, sas_node,
-                   mpt3sas_phy);
-       }
 
        if (mpt3sas_phy->phy)
                mpt3sas_phy->phy->negotiated_linkrate =
index f15df3de6790eeea406dd5593f3e87e74c83957f..53030b0e8015d833719b276adad1d8898cb89780 100644 (file)
@@ -54,7 +54,6 @@ static struct scsi_host_template mvs_sht = {
        .scan_finished          = mvs_scan_finished,
        .scan_start             = mvs_scan_start,
        .change_queue_depth     = sas_change_queue_depth,
-       .change_queue_type      = sas_change_queue_type,
        .bios_param             = sas_bios_param,
        .can_queue              = 1,
        .cmd_per_lun            = 1,
index 329aba0083aba7f3556610a7d924241c218ae812..65555916d3b84e89fefa34817375276f0831342e 100644 (file)
@@ -76,7 +76,6 @@ static struct scsi_host_template pm8001_sht = {
        .scan_finished          = pm8001_scan_finished,
        .scan_start             = pm8001_scan_start,
        .change_queue_depth     = sas_change_queue_depth,
-       .change_queue_type      = sas_change_queue_type,
        .bios_param             = sas_bios_param,
        .can_queue              = 1,
        .cmd_per_lun            = 1,
index b1b1f66b1ab7817f739da1308f1cd92932c164dc..8c27b6a77ec4b1ae9c914cfe15eb05d7dd0b371a 100644 (file)
@@ -4251,7 +4251,6 @@ static struct scsi_host_template pmcraid_host_template = {
        .slave_configure = pmcraid_slave_configure,
        .slave_destroy = pmcraid_slave_destroy,
        .change_queue_depth = pmcraid_change_queue_depth,
-       .change_queue_type  = scsi_change_queue_type,
        .can_queue = PMCRAID_MAX_IO_CMD,
        .this_id = -1,
        .sg_tablesize = PMCRAID_MAX_IOADLS,
index a4dde7e80dbdb539c78367e3238d1352047b509b..e59f25bff7abbb49a9f2478b8e1cf59e9d8fe399 100644 (file)
@@ -3237,8 +3237,6 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
        struct fc_rport *rport;
        unsigned long flags;
 
-       qla2x00_rport_del(fcport);
-
        rport_ids.node_name = wwn_to_u64(fcport->node_name);
        rport_ids.port_name = wwn_to_u64(fcport->port_name);
        rport_ids.port_id = fcport->d_id.b.domain << 16 |
index 6b4d9235368a716b596b7af112312cdcaf7b494d..12ca291c1380845e45a40b0b4f5f4dd3356ab429 100644 (file)
@@ -258,7 +258,6 @@ struct scsi_host_template qla2xxx_driver_template = {
        .scan_finished          = qla2xxx_scan_finished,
        .scan_start             = qla2xxx_scan_start,
        .change_queue_depth     = scsi_change_queue_depth,
-       .change_queue_type      = scsi_change_queue_type,
        .this_id                = -1,
        .cmd_per_lun            = 3,
        .use_clustering         = ENABLE_CLUSTERING,
index a902fa1db7aff1a0fd3ca8622040b56f6e03cef1..57418258c1017631798b73d78bf73e2367ae8917 100644 (file)
@@ -3218,25 +3218,25 @@ static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
 
        switch (task_codes) {
        case ATIO_SIMPLE_QUEUE:
-               fcp_task_attr = MSG_SIMPLE_TAG;
+               fcp_task_attr = TCM_SIMPLE_TAG;
                break;
        case ATIO_HEAD_OF_QUEUE:
-               fcp_task_attr = MSG_HEAD_TAG;
+               fcp_task_attr = TCM_HEAD_TAG;
                break;
        case ATIO_ORDERED_QUEUE:
-               fcp_task_attr = MSG_ORDERED_TAG;
+               fcp_task_attr = TCM_ORDERED_TAG;
                break;
        case ATIO_ACA_QUEUE:
-               fcp_task_attr = MSG_ACA_TAG;
+               fcp_task_attr = TCM_ACA_TAG;
                break;
        case ATIO_UNTAGGED:
-               fcp_task_attr = MSG_SIMPLE_TAG;
+               fcp_task_attr = TCM_SIMPLE_TAG;
                break;
        default:
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
                    "qla_target: unknown task code %x, use ORDERED instead\n",
                    task_codes);
-               fcp_task_attr = MSG_ORDERED_TAG;
+               fcp_task_attr = TCM_ORDERED_TAG;
                break;
        }
 
index 1ad0c36375b8043646bf9bf4078acad9b2c96619..e02885451425dbd4af6272ce53d1b2b54f809a43 100644 (file)
@@ -739,33 +739,11 @@ int scsi_track_queue_full(struct scsi_device *sdev, int depth)
 
        if (sdev->last_queue_full_count <= 10)
                return 0;
-       if (sdev->last_queue_full_depth < 8) {
-               /* Drop back to untagged */
-               scsi_set_tag_type(sdev, 0);
-               scsi_change_queue_depth(sdev, sdev->host->cmd_per_lun);
-               return -1;
-       }
 
        return scsi_change_queue_depth(sdev, depth);
 }
 EXPORT_SYMBOL(scsi_track_queue_full);
 
-/**
- * scsi_change_queue_type() - Change a device's queue type
- * @sdev:     The SCSI device whose queue depth is to change
- * @tag_type: Identifier for queue type
- */
-int scsi_change_queue_type(struct scsi_device *sdev, int tag_type)
-{
-       if (!sdev->tagged_supported)
-               return 0;
-
-       scsi_set_tag_type(sdev, tag_type);
-       return tag_type;
-
-}
-EXPORT_SYMBOL(scsi_change_queue_type);
-
 /**
  * scsi_vpd_inquiry - Request a device provide us with a VPD page
  * @sdev: The device to ask
index aa4b6b80aadee31c85d9678d82160472e518a907..7b8b51bc29b4353debc9bd9911fcbf939753c782 100644 (file)
@@ -128,7 +128,6 @@ static const char *scsi_debug_version_date = "20141022";
 #define DEF_REMOVABLE false
 #define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
 #define DEF_SECTOR_SIZE 512
-#define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
 #define DEF_UNMAP_ALIGNMENT 0
 #define DEF_UNMAP_GRANULARITY 1
 #define DEF_UNMAP_MAX_BLOCKS 0xFFFFFFFF
@@ -817,6 +816,7 @@ static int check_readiness(struct scsi_cmnd *SCpnt, int uas_only,
                                        UA_CHANGED_ASC, CAPACITY_CHANGED_ASCQ);
                        if (debug)
                                cp = "capacity data changed";
+                       break;
                default:
                        pr_warn("%s: unexpected unit attention code=%d\n",
                                __func__, k);
@@ -3045,18 +3045,12 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
        u8 num;
        unsigned long iflags;
        int ret;
+       int retval = 0;
 
-       lba = get_unaligned_be32(cmd + 2);
+       lba = get_unaligned_be64(cmd + 2);
        num = cmd[13];          /* 1 to a maximum of 255 logical blocks */
        if (0 == num)
                return 0;       /* degenerate case, not an error */
-       dnum = 2 * num;
-       arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
-       if (NULL == arr) {
-               mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
-                               INSUFF_RES_ASCQ);
-               return check_condition_result;
-       }
        if (scsi_debug_dif == SD_DIF_TYPE2_PROTECTION &&
            (cmd[1] & 0xe0)) {
                mk_sense_invalid_opcode(scp);
@@ -3079,6 +3073,13 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
                mk_sense_buffer(scp, ILLEGAL_REQUEST, INVALID_FIELD_IN_CDB, 0);
                return check_condition_result;
        }
+       dnum = 2 * num;
+       arr = kzalloc(dnum * lb_size, GFP_ATOMIC);
+       if (NULL == arr) {
+               mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
+                               INSUFF_RES_ASCQ);
+               return check_condition_result;
+       }
 
        write_lock_irqsave(&atomic_rw, iflags);
 
@@ -3089,24 +3090,24 @@ resp_comp_write(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
        ret = do_device_access(scp, 0, dnum, true);
        fake_storep = fake_storep_hold;
        if (ret == -1) {
-               write_unlock_irqrestore(&atomic_rw, iflags);
-               kfree(arr);
-               return DID_ERROR << 16;
+               retval = DID_ERROR << 16;
+               goto cleanup;
        } else if ((ret < (dnum * lb_size)) &&
                 (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
                sdev_printk(KERN_INFO, scp->device, "%s: compare_write: cdb "
                            "indicated=%u, IO sent=%d bytes\n", my_name,
                            dnum * lb_size, ret);
        if (!comp_write_worker(lba, num, arr)) {
-               write_unlock_irqrestore(&atomic_rw, iflags);
-               kfree(arr);
                mk_sense_buffer(scp, MISCOMPARE, MISCOMPARE_VERIFY_ASC, 0);
-               return check_condition_result;
+               retval = check_condition_result;
+               goto cleanup;
        }
        if (scsi_debug_lbp())
                map_region(lba, num);
+cleanup:
        write_unlock_irqrestore(&atomic_rw, iflags);
-       return 0;
+       kfree(arr);
+       return retval;
 }
 
 struct unmap_block_desc {
@@ -4438,6 +4439,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
                        struct sdebug_host_info *sdhp;
                        struct sdebug_dev_info *dp;
 
+                       spin_lock(&sdebug_host_list_lock);
                        list_for_each_entry(sdhp, &sdebug_host_list,
                                            host_list) {
                                list_for_each_entry(dp, &sdhp->dev_info_list,
@@ -4446,6 +4448,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
                                                dp->uas_bm);
                                }
                        }
+                       spin_unlock(&sdebug_host_list_lock);
                }
                return count;
        }
@@ -4987,32 +4990,6 @@ sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
        return sdev->queue_depth;
 }
 
-static int
-sdebug_change_qtype(struct scsi_device *sdev, int qtype)
-{
-       qtype = scsi_change_queue_type(sdev, qtype);
-       if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) {
-               const char *cp;
-
-               switch (qtype) {
-               case 0:
-                       cp = "untagged";
-                       break;
-               case MSG_SIMPLE_TAG:
-                       cp = "simple tags";
-                       break;
-               case MSG_ORDERED_TAG:
-                       cp = "ordered tags";
-                       break;
-               default:
-                       cp = "unknown";
-                       break;
-               }
-               sdev_printk(KERN_INFO, sdev, "%s: to %s\n", __func__, cp);
-       }
-       return qtype;
-}
-
 static int
 check_inject(struct scsi_cmnd *scp)
 {
@@ -5212,7 +5189,6 @@ static struct scsi_host_template sdebug_driver_template = {
        .ioctl =                scsi_debug_ioctl,
        .queuecommand =         sdebug_queuecommand_lock_or_not,
        .change_queue_depth =   sdebug_change_qdepth,
-       .change_queue_type =    sdebug_change_qtype,
        .eh_abort_handler =     scsi_debug_abort,
        .eh_device_reset_handler = scsi_debug_device_reset,
        .eh_target_reset_handler = scsi_debug_target_reset,
index c1d04d4d3c6c140457c19e50865b29bd3287d54f..262ab837a7040d5586e4212b59e569aa55a2df0c 100644 (file)
@@ -211,6 +211,7 @@ static struct {
        {"Medion", "Flash XL  MMC/SD", "2.6D", BLIST_FORCELUN},
        {"MegaRAID", "LD", NULL, BLIST_FORCELUN},
        {"MICROP", "4110", NULL, BLIST_NOTQ},
+       {"MSFT", "Virtual HD", NULL, BLIST_NO_RSOC},
        {"MYLEX", "DACARMRB", "*", BLIST_REPORTLUN2},
        {"nCipher", "Fastness Crypto", NULL, BLIST_FORCELUN},
        {"NAKAMICH", "MJ-4.8S", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
index 43318d556cbcf28209f317e0c3646923324e6127..9ea95dd3e2604eea2613a5a15d074c2357fac7dd 100644 (file)
@@ -1918,7 +1918,9 @@ static int scsi_mq_prep_fn(struct request *req)
 
        if (scsi_host_get_prot(shost)) {
                cmd->prot_sdb = (void *)sg +
-                       shost->sg_tablesize * sizeof(struct scatterlist);
+                       min_t(unsigned int,
+                             shost->sg_tablesize, SCSI_MAX_SG_SEGMENTS) *
+                       sizeof(struct scatterlist);
                memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer));
 
                cmd->prot_sdb->table.sgl =
index 1cb64a8e18c91475d7f0bda12246fe7862782c03..1ac38e73df7eec896cf2835fd8588a1db5278a46 100644 (file)
@@ -738,30 +738,12 @@ store_queue_type_field(struct device *dev, struct device_attribute *attr,
                       const char *buf, size_t count)
 {
        struct scsi_device *sdev = to_scsi_device(dev);
-       struct scsi_host_template *sht = sdev->host->hostt;
-       int tag_type = 0, retval;
-       int prev_tag_type = scsi_get_tag_type(sdev);
-
-       if (!sdev->tagged_supported || !sht->change_queue_type)
-               return -EINVAL;
 
-       /*
-        * We're never issueing order tags these days, but allow the value
-        * for backwards compatibility.
-        */
-       if (strncmp(buf, "ordered", 7) == 0 ||
-           strncmp(buf, "simple", 6) == 0)
-               tag_type = MSG_SIMPLE_TAG;
-       else if (strncmp(buf, "none", 4) != 0)
+       if (!sdev->tagged_supported)
                return -EINVAL;
-
-       if (tag_type == prev_tag_type)
-               return count;
-
-       retval = sht->change_queue_type(sdev, tag_type);
-       if (retval < 0)
-               return retval;
-
+               
+       sdev_printk(KERN_INFO, sdev,
+                   "ignoring write to deprecated queue_type attribute");
        return count;
 }
 
@@ -938,10 +920,6 @@ static umode_t scsi_sdev_attr_is_visible(struct kobject *kobj,
            !sdev->host->hostt->change_queue_depth)
                return 0;
 
-       if (attr == &dev_attr_queue_type.attr &&
-           !sdev->host->hostt->change_queue_type)
-               return S_IRUGO;
-
        return attr->mode;
 }
 
index fa2aece76cc22094d36d605bc9075def9db9783b..31bbb0da3397f9a4f743b897b5f4c6e5d29dd19d 100644 (file)
@@ -1221,7 +1221,7 @@ EXPORT_SYMBOL_GPL(spi_populate_ppr_msg);
 int spi_populate_tag_msg(unsigned char *msg, struct scsi_cmnd *cmd)
 {
         if (cmd->flags & SCMD_TAGGED) {
-               *msg++ = MSG_SIMPLE_TAG;
+               *msg++ = SIMPLE_QUEUE_TAG;
                *msg++ = cmd->request->tag;
                return 2;
        }
index e3ba251fb6e75540f229af13a1af9f7f7d28dcbd..4cff0ddc2c25bd4a1ed977129fffa4923ab1f494 100644 (file)
@@ -1688,13 +1688,12 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
        if (ret == -EAGAIN) {
                /* no more space */
 
-               if (cmd_request->bounce_sgl_count) {
+               if (cmd_request->bounce_sgl_count)
                        destroy_bounce_buffer(cmd_request->bounce_sgl,
                                        cmd_request->bounce_sgl_count);
 
-                       ret = SCSI_MLQUEUE_DEVICE_BUSY;
-                       goto queue_error;
-               }
+               ret = SCSI_MLQUEUE_DEVICE_BUSY;
+               goto queue_error;
        }
 
        return 0;
index 43781c9fe521a3c6111d46fccb101894f34fe8c2..aad6683db81b9a0154d12d3fc71fd5d45fb8bfc3 100644 (file)
@@ -341,7 +341,7 @@ static int img_spfi_start_dma(struct spi_master *master,
                default:
                        rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
                        rxconf.src_addr_width = 1;
-                       rxconf.src_maxburst = 1;
+                       rxconf.src_maxburst = 4;
                }
                dmaengine_slave_config(spfi->rx_ch, &rxconf);
 
@@ -368,7 +368,7 @@ static int img_spfi_start_dma(struct spi_master *master,
                default:
                        txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
                        txconf.dst_addr_width = 1;
-                       txconf.dst_maxburst = 1;
+                       txconf.dst_maxburst = 4;
                        break;
                }
                dmaengine_slave_config(spfi->tx_ch, &txconf);
@@ -390,14 +390,14 @@ static int img_spfi_start_dma(struct spi_master *master,
                dma_async_issue_pending(spfi->rx_ch);
        }
 
+       spfi_start(spfi);
+
        if (xfer->tx_buf) {
                spfi->tx_dma_busy = true;
                dmaengine_submit(txdesc);
                dma_async_issue_pending(spfi->tx_ch);
        }
 
-       spfi_start(spfi);
-
        return 1;
 
 stop_dma:
@@ -663,7 +663,7 @@ static int img_spfi_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
 static int img_spfi_runtime_suspend(struct device *dev)
 {
        struct spi_master *master = dev_get_drvdata(dev);
@@ -692,7 +692,7 @@ static int img_spfi_runtime_resume(struct device *dev)
 
        return 0;
 }
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
 
 #ifdef CONFIG_PM_SLEEP
 static int img_spfi_suspend(struct device *dev)
index 0e48f8c2037d62508f6636afb9edb349bdf218c7..1bbac0378bf7bcdc00d336428cfec4440f0701ce 100644 (file)
@@ -413,7 +413,7 @@ static int meson_spifc_resume(struct device *dev)
 }
 #endif /* CONFIG_PM_SLEEP */
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
 static int meson_spifc_runtime_suspend(struct device *dev)
 {
        struct spi_master *master = dev_get_drvdata(dev);
@@ -431,7 +431,7 @@ static int meson_spifc_runtime_resume(struct device *dev)
 
        return clk_prepare_enable(spifc->clk);
 }
-#endif /* CONFIG_PM_RUNTIME */
+#endif /* CONFIG_PM */
 
 static const struct dev_pm_ops meson_spifc_pm_ops = {
        SET_SYSTEM_SLEEP_PM_OPS(meson_spifc_suspend, meson_spifc_resume)
index 239be7cbe5a83ee5e5a037bec810ee3bd24bf382..96a5fc0878d86d4fc217b30d466621176b1bc24f 100644 (file)
@@ -480,6 +480,8 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
        struct device_node      *np = spi->master->dev.of_node;
        struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
 
+       pm_runtime_get_sync(&p->pdev->dev);
+
        if (!np) {
                /*
                 * Use spi->controller_data for CS (same strategy as spi_gpio),
@@ -498,6 +500,9 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
        if (spi->cs_gpio >= 0)
                gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
 
+
+       pm_runtime_put_sync(&p->pdev->dev);
+
        return 0;
 }
 
index 8156b4c0f56876872abbea62e75b6172931f3727..3925db160650ca5d96df0880331717188a2ce23f 100644 (file)
 
 #include "lustre_patchless_compat.h"
 
-# define LOCK_FS_STRUCT(fs)    spin_lock(&(fs)->lock)
-# define UNLOCK_FS_STRUCT(fs)  spin_unlock(&(fs)->lock)
-
-static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
-                                struct dentry *dentry)
-{
-       struct path path;
-       struct path old_pwd;
-
-       path.mnt = mnt;
-       path.dentry = dentry;
-       LOCK_FS_STRUCT(fs);
-       old_pwd = fs->pwd;
-       path_get(&path);
-       fs->pwd = path;
-       UNLOCK_FS_STRUCT(fs);
-
-       if (old_pwd.dentry)
-               path_put(&old_pwd);
-}
-
-
 /*
  * set ATTR_BLOCKS to a high value to avoid any risk of collision with other
  * ATTR_* attributes (see bug 13828)
@@ -110,8 +88,6 @@ static inline void ll_set_fs_pwd(struct fs_struct *fs, struct vfsmount *mnt,
 #define cfs_bio_io_error(a, b)   bio_io_error((a))
 #define cfs_bio_endio(a, b, c)    bio_endio((a), (c))
 
-#define cfs_fs_pwd(fs)       ((fs)->pwd.dentry)
-#define cfs_fs_mnt(fs)       ((fs)->pwd.mnt)
 #define cfs_path_put(nd)     path_put(&(nd)->path)
 
 
index 407718a0026f56071a9667ebe7088f158eba013e..1ac7a702ce261e6b6fcd2ac36805d63eed3c91bb 100644 (file)
@@ -661,7 +661,7 @@ int ll_dir_setdirstripe(struct inode *dir, struct lmv_user_md *lump,
        int mode;
        int err;
 
-       mode = (0755 & (S_IRWXUGO|S_ISVTX) & ~current->fs->umask) | S_IFDIR;
+       mode = (0755 & ~current_umask()) | S_IFDIR;
        op_data = ll_prep_md_op_data(NULL, dir, NULL, filename,
                                     strlen(filename), mode, LUSTRE_OPC_MKDIR,
                                     lump);
index 6e423aa6a6e4871b1845285ec5c8f84494d3df5a..a3367bfb1456a39c9c678bc7877e1e93f4531783 100644 (file)
@@ -2372,21 +2372,6 @@ char *ll_get_fsname(struct super_block *sb, char *buf, int buflen)
        return buf;
 }
 
-static char *ll_d_path(struct dentry *dentry, char *buf, int bufsize)
-{
-       char *path = NULL;
-
-       struct path p;
-
-       p.dentry = dentry;
-       p.mnt = current->fs->root.mnt;
-       path_get(&p);
-       path = d_path(&p, buf, bufsize);
-       path_put(&p);
-
-       return path;
-}
-
 void ll_dirty_page_discard_warn(struct page *page, int ioret)
 {
        char *buf, *path = NULL;
@@ -2398,7 +2383,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
        if (buf != NULL) {
                dentry = d_find_alias(page->mapping->host);
                if (dentry != NULL)
-                       path = ll_d_path(dentry, buf, PAGE_SIZE);
+                       path = dentry_path_raw(dentry, buf, PAGE_SIZE);
        }
 
        CDEBUG(D_WARNING,
index 1bf891bd321aa2e0d19bb79e2e217376d31f5e09..4f361b77c749a718621c8767ec97663e43a7c87f 100644 (file)
@@ -264,7 +264,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
 
                if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
                    inode->i_sb->s_root != NULL &&
-                   is_root_inode(inode))
+                   !is_root_inode(inode))
                        ll_invalidate_aliases(inode);
 
                iput(inode);
index 73e58d22e325d8780d17470a9a9851a732669f89..55f6774f706f729b92fb2045abb7f9a33740c2b4 100644 (file)
@@ -609,6 +609,7 @@ static int __init iscsi_target_init_module(void)
 
        return ret;
 r2t_out:
+       iscsit_unregister_transport(&iscsi_target_transport);
        kmem_cache_destroy(lio_r2t_cache);
 ooo_out:
        kmem_cache_destroy(lio_ooo_cache);
@@ -943,17 +944,17 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
         */
        if ((iscsi_task_attr == ISCSI_ATTR_UNTAGGED) ||
            (iscsi_task_attr == ISCSI_ATTR_SIMPLE))
-               sam_task_attr = MSG_SIMPLE_TAG;
+               sam_task_attr = TCM_SIMPLE_TAG;
        else if (iscsi_task_attr == ISCSI_ATTR_ORDERED)
-               sam_task_attr = MSG_ORDERED_TAG;
+               sam_task_attr = TCM_ORDERED_TAG;
        else if (iscsi_task_attr == ISCSI_ATTR_HEAD_OF_QUEUE)
-               sam_task_attr = MSG_HEAD_TAG;
+               sam_task_attr = TCM_HEAD_TAG;
        else if (iscsi_task_attr == ISCSI_ATTR_ACA)
-               sam_task_attr = MSG_ACA_TAG;
+               sam_task_attr = TCM_ACA_TAG;
        else {
                pr_debug("Unknown iSCSI Task Attribute: 0x%02x, using"
-                       " MSG_SIMPLE_TAG\n", iscsi_task_attr);
-               sam_task_attr = MSG_SIMPLE_TAG;
+                       " TCM_SIMPLE_TAG\n", iscsi_task_attr);
+               sam_task_attr = TCM_SIMPLE_TAG;
        }
 
        cmd->iscsi_opcode       = ISCSI_OP_SCSI_CMD;
@@ -1811,7 +1812,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
                transport_init_se_cmd(&cmd->se_cmd,
                                      &lio_target_fabric_configfs->tf_ops,
                                      conn->sess->se_sess, 0, DMA_NONE,
-                                     MSG_SIMPLE_TAG, cmd->sense_buffer + 2);
+                                     TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
 
                target_get_sess_cmd(conn->sess->se_sess, &cmd->se_cmd, true);
                sess_ref = true;
index 302eb3b7871558bb2a1241fedc521e20efebf415..09a522bae222d190ec92e157a42f13d2e361da4a 100644 (file)
@@ -790,7 +790,6 @@ struct iscsi_np {
        void                    *np_context;
        struct iscsit_transport *np_transport;
        struct list_head        np_list;
-       struct iscsi_tpg_np     *tpg_np;
 } ____cacheline_aligned;
 
 struct iscsi_tpg_np {
index 480f2e0ecc1170884c5f56ca38644fab1465a0de..713c0c1877ab8d16bb999ab6738b058e11c82ba9 100644 (file)
@@ -281,7 +281,6 @@ static int iscsi_login_zero_tsih_s1(
 {
        struct iscsi_session *sess = NULL;
        struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf;
-       enum target_prot_op sup_pro_ops;
        int ret;
 
        sess = kzalloc(sizeof(struct iscsi_session), GFP_KERNEL);
@@ -343,9 +342,8 @@ static int iscsi_login_zero_tsih_s1(
                kfree(sess);
                return -ENOMEM;
        }
-       sup_pro_ops = conn->conn_transport->iscsit_get_sup_prot_ops(conn);
 
-       sess->se_sess = transport_init_session(sup_pro_ops);
+       sess->se_sess = transport_init_session(TARGET_PROT_NORMAL);
        if (IS_ERR(sess->se_sess)) {
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
@@ -1161,6 +1159,7 @@ void iscsi_target_login_sess_out(struct iscsi_conn *conn,
        }
        kfree(conn->sess->sess_ops);
        kfree(conn->sess);
+       conn->sess = NULL;
 
 old_sess_out:
        iscsi_stop_login_thread_timer(np);
@@ -1204,6 +1203,9 @@ old_sess_out:
                conn->sock = NULL;
        }
 
+       if (conn->conn_transport->iscsit_wait_conn)
+               conn->conn_transport->iscsit_wait_conn(conn);
+
        if (conn->conn_transport->iscsit_free_conn)
                conn->conn_transport->iscsit_free_conn(conn);
 
@@ -1364,6 +1366,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        }
        login->zero_tsih = zero_tsih;
 
+       conn->sess->se_sess->sup_prot_ops =
+               conn->conn_transport->iscsit_get_sup_prot_ops(conn);
+
        tpg = conn->tpg;
        if (!tpg) {
                pr_err("Unable to locate struct iscsi_conn->tpg\n");
index c3cb5c15efdaa4fe1e5ea2c4a90cd6eb0e167a15..9053a3c0c6e51675faf45bfbbfd990d48752f67d 100644 (file)
@@ -501,7 +501,6 @@ struct iscsi_tpg_np *iscsit_tpg_add_network_portal(
        init_completion(&tpg_np->tpg_np_comp);
        kref_init(&tpg_np->tpg_np_kref);
        tpg_np->tpg_np          = np;
-       np->tpg_np              = tpg_np;
        tpg_np->tpg             = tpg;
 
        spin_lock(&tpg->tpg_np_lock);
index 882728fac30c4e8a8fcf9db5e26f826ec86d2f0d..08217d62fb0d6860e40bcb9fa4b2947e710b3bd4 100644 (file)
@@ -26,8 +26,7 @@ struct iscsit_transport *iscsit_get_transport(int type)
 
 void iscsit_put_transport(struct iscsit_transport *t)
 {
-       if (t->owner)
-               module_put(t->owner);
+       module_put(t->owner);
 }
 
 int iscsit_register_transport(struct iscsit_transport *t)
index 7c6a95bcb35e4ec043679338211f2435f98ccd33..bcd88ec99793ba554496369d8853ee391cef8c11 100644 (file)
@@ -1356,15 +1356,15 @@ static int iscsit_do_tx_data(
        struct iscsi_conn *conn,
        struct iscsi_data_count *count)
 {
-       int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len;
+       int ret, iov_len;
        struct kvec *iov_p;
        struct msghdr msg;
 
        if (!conn || !conn->sock || !conn->conn_ops)
                return -1;
 
-       if (data <= 0) {
-               pr_err("Data length is: %d\n", data);
+       if (count->data_length <= 0) {
+               pr_err("Data length is: %d\n", count->data_length);
                return -1;
        }
 
@@ -1373,20 +1373,16 @@ static int iscsit_do_tx_data(
        iov_p = count->iov;
        iov_len = count->iov_count;
 
-       while (total_tx < data) {
-               tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
-                                       (data - total_tx));
-               if (tx_loop <= 0) {
-                       pr_debug("tx_loop: %d total_tx %d\n",
-                               tx_loop, total_tx);
-                       return tx_loop;
-               }
-               total_tx += tx_loop;
-               pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
-                                       tx_loop, total_tx, data);
+       ret = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len,
+                            count->data_length);
+       if (ret != count->data_length) {
+               pr_err("Unexpected ret: %d send data %d\n",
+                      ret, count->data_length);
+               return -EPIPE;
        }
+       pr_debug("ret: %d, sent data: %d\n", ret, count->data_length);
 
-       return total_tx;
+       return ret;
 }
 
 int rx_data(
index 4d1b7224a7f2559d200ec11aaced60a8d67c4a6c..6b3c329546895ab1095beb5c048ef24c6e1cea19 100644 (file)
@@ -138,7 +138,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
                set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
                goto out_done;
        }
-       tl_nexus = tl_hba->tl_nexus;
+       tl_nexus = tl_tpg->tl_nexus;
        if (!tl_nexus) {
                scmd_printk(KERN_ERR, sc, "TCM_Loop I_T Nexus"
                                " does not exist\n");
@@ -168,7 +168,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
 
        rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
                        &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
-                       transfer_length, MSG_SIMPLE_TAG,
+                       transfer_length, TCM_SIMPLE_TAG,
                        sc->sc_data_direction, 0,
                        scsi_sglist(sc), scsi_sg_count(sc),
                        sgl_bidi, sgl_bidi_count,
@@ -218,16 +218,26 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
  * to struct scsi_device
  */
 static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
-                             struct tcm_loop_nexus *tl_nexus,
                              int lun, int task, enum tcm_tmreq_table tmr)
 {
        struct se_cmd *se_cmd = NULL;
        struct se_session *se_sess;
        struct se_portal_group *se_tpg;
+       struct tcm_loop_nexus *tl_nexus;
        struct tcm_loop_cmd *tl_cmd = NULL;
        struct tcm_loop_tmr *tl_tmr = NULL;
        int ret = TMR_FUNCTION_FAILED, rc;
 
+       /*
+        * Locate the tl_nexus and se_sess pointers
+        */
+       tl_nexus = tl_tpg->tl_nexus;
+       if (!tl_nexus) {
+               pr_err("Unable to perform device reset without"
+                               " active I_T Nexus\n");
+               return ret;
+       }
+
        tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
        if (!tl_cmd) {
                pr_err("Unable to allocate memory for tl_cmd\n");
@@ -243,12 +253,12 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg,
 
        se_cmd = &tl_cmd->tl_se_cmd;
        se_tpg = &tl_tpg->tl_se_tpg;
-       se_sess = tl_nexus->se_sess;
+       se_sess = tl_tpg->tl_nexus->se_sess;
        /*
         * Initialize struct se_cmd descriptor from target_core_mod infrastructure
         */
        transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0,
-                               DMA_NONE, MSG_SIMPLE_TAG,
+                               DMA_NONE, TCM_SIMPLE_TAG,
                                &tl_cmd->tl_sense_buf[0]);
 
        rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL);
@@ -288,7 +298,6 @@ release:
 static int tcm_loop_abort_task(struct scsi_cmnd *sc)
 {
        struct tcm_loop_hba *tl_hba;
-       struct tcm_loop_nexus *tl_nexus;
        struct tcm_loop_tpg *tl_tpg;
        int ret = FAILED;
 
@@ -296,21 +305,8 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
         * Locate the tcm_loop_hba_t pointer
         */
        tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
-       /*
-        * Locate the tl_nexus and se_sess pointers
-        */
-       tl_nexus = tl_hba->tl_nexus;
-       if (!tl_nexus) {
-               pr_err("Unable to perform device reset without"
-                               " active I_T Nexus\n");
-               return FAILED;
-       }
-
-       /*
-        * Locate the tl_tpg pointer from TargetID in sc->device->id
-        */
        tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
-       ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+       ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
                                 sc->request->tag, TMR_ABORT_TASK);
        return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 }
@@ -322,7 +318,6 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
 static int tcm_loop_device_reset(struct scsi_cmnd *sc)
 {
        struct tcm_loop_hba *tl_hba;
-       struct tcm_loop_nexus *tl_nexus;
        struct tcm_loop_tpg *tl_tpg;
        int ret = FAILED;
 
@@ -330,20 +325,9 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)
         * Locate the tcm_loop_hba_t pointer
         */
        tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);
-       /*
-        * Locate the tl_nexus and se_sess pointers
-        */
-       tl_nexus = tl_hba->tl_nexus;
-       if (!tl_nexus) {
-               pr_err("Unable to perform device reset without"
-                               " active I_T Nexus\n");
-               return FAILED;
-       }
-       /*
-        * Locate the tl_tpg pointer from TargetID in sc->device->id
-        */
        tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
-       ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
+
+       ret = tcm_loop_issue_tmr(tl_tpg, sc->device->lun,
                                 0, TMR_LUN_RESET);
        return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
 }
@@ -385,7 +369,6 @@ static struct scsi_host_template tcm_loop_driver_template = {
        .name                   = "TCM_Loopback",
        .queuecommand           = tcm_loop_queuecommand,
        .change_queue_depth     = scsi_change_queue_depth,
-       .change_queue_type      = scsi_change_queue_type,
        .eh_abort_handler = tcm_loop_abort_task,
        .eh_device_reset_handler = tcm_loop_device_reset,
        .eh_target_reset_handler = tcm_loop_target_reset,
@@ -940,8 +923,8 @@ static int tcm_loop_make_nexus(
        struct tcm_loop_nexus *tl_nexus;
        int ret = -ENOMEM;
 
-       if (tl_tpg->tl_hba->tl_nexus) {
-               pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
+       if (tl_tpg->tl_nexus) {
+               pr_debug("tl_tpg->tl_nexus already exists\n");
                return -EEXIST;
        }
        se_tpg = &tl_tpg->tl_se_tpg;
@@ -976,7 +959,7 @@ static int tcm_loop_make_nexus(
         */
        __transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
                        tl_nexus->se_sess, tl_nexus);
-       tl_tpg->tl_hba->tl_nexus = tl_nexus;
+       tl_tpg->tl_nexus = tl_nexus;
        pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
                " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
                name);
@@ -992,12 +975,8 @@ static int tcm_loop_drop_nexus(
 {
        struct se_session *se_sess;
        struct tcm_loop_nexus *tl_nexus;
-       struct tcm_loop_hba *tl_hba = tpg->tl_hba;
 
-       if (!tl_hba)
-               return -ENODEV;
-
-       tl_nexus = tl_hba->tl_nexus;
+       tl_nexus = tpg->tl_nexus;
        if (!tl_nexus)
                return -ENODEV;
 
@@ -1013,13 +992,13 @@ static int tcm_loop_drop_nexus(
        }
 
        pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
-               " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
+               " %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tpg->tl_hba),
                tl_nexus->se_sess->se_node_acl->initiatorname);
        /*
         * Release the SCSI I_T Nexus to the emulated SAS Target Port
         */
        transport_deregister_session(tl_nexus->se_sess);
-       tpg->tl_hba->tl_nexus = NULL;
+       tpg->tl_nexus = NULL;
        kfree(tl_nexus);
        return 0;
 }
@@ -1035,7 +1014,7 @@ static ssize_t tcm_loop_tpg_show_nexus(
        struct tcm_loop_nexus *tl_nexus;
        ssize_t ret;
 
-       tl_nexus = tl_tpg->tl_hba->tl_nexus;
+       tl_nexus = tl_tpg->tl_nexus;
        if (!tl_nexus)
                return -ENODEV;
 
index 54c59d0b6608f88bcf3f58beda8e6cc92d646b99..6ae49f272ba6f72d1cd425d30c66227d5d696f31 100644 (file)
@@ -27,11 +27,6 @@ struct tcm_loop_tmr {
 };
 
 struct tcm_loop_nexus {
-       int it_nexus_active;
-       /*
-        * Pointer to Linux/SCSI HBA from linux/include/scsi_host.h
-        */
-       struct scsi_host *sh;
        /*
         * Pointer to TCM session for I_T Nexus
         */
@@ -51,6 +46,7 @@ struct tcm_loop_tpg {
        atomic_t tl_tpg_port_count;
        struct se_portal_group tl_se_tpg;
        struct tcm_loop_hba *tl_hba;
+       struct tcm_loop_nexus *tl_nexus;
 };
 
 struct tcm_loop_hba {
@@ -59,7 +55,6 @@ struct tcm_loop_hba {
        struct se_hba_s *se_hba;
        struct se_lun *tl_hba_lun;
        struct se_port *tl_hba_lun_sep;
-       struct tcm_loop_nexus *tl_nexus;
        struct device dev;
        struct Scsi_Host *sh;
        struct tcm_loop_tpg tl_hba_tpgs[TL_TPGS_PER_HBA];
index e7e93727553cef8d3503201f381bd64b3f02d6b3..9512af6a811408f169b7a6c5084dda5ce98d8dfa 100644 (file)
@@ -1237,7 +1237,7 @@ static void sbp_handle_command(struct sbp_target_request *req)
 
        if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
                              req->sense_buf, unpacked_lun, data_length,
-                             MSG_SIMPLE_TAG, data_dir, 0))
+                             TCM_SIMPLE_TAG, data_dir, 0))
                goto err;
 
        return;
index 79f9296a08ae7195d859c5ba200b2b990f78c0de..75d89adfccc025f7fa862c5c7aba157113b001ea 100644 (file)
 #include "target_core_rd.h"
 #include "target_core_xcopy.h"
 
+#define TB_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)             \
+static void target_core_setup_##_name##_cit(struct se_subsystem_api *sa) \
+{                                                                      \
+       struct target_backend_cits *tbc = &sa->tb_cits;                 \
+       struct config_item_type *cit = &tbc->tb_##_name##_cit;          \
+                                                                       \
+       cit->ct_item_ops = _item_ops;                                   \
+       cit->ct_group_ops = _group_ops;                                 \
+       cit->ct_attrs = _attrs;                                         \
+       cit->ct_owner = sa->owner;                                      \
+       pr_debug("Setup generic %s\n", __stringify(_name));             \
+}
+
 extern struct t10_alua_lu_gp *default_lu_gp;
 
 static LIST_HEAD(g_tf_list);
@@ -126,48 +139,57 @@ static struct config_group *target_core_register_fabric(
 
        pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
                        " %s\n", group, name);
-       /*
-        * Below are some hardcoded request_module() calls to automatically
-        * local fabric modules when the following is called:
-        *
-        * mkdir -p /sys/kernel/config/target/$MODULE_NAME
-        *
-        * Note that this does not limit which TCM fabric module can be
-        * registered, but simply provids auto loading logic for modules with
-        * mkdir(2) system calls with known TCM fabric modules.
-        */
-       if (!strncmp(name, "iscsi", 5)) {
+
+       tf = target_core_get_fabric(name);
+       if (!tf) {
+               pr_err("target_core_register_fabric() trying autoload for %s\n",
+                       name);
+
                /*
-                * Automatically load the LIO Target fabric module when the
-                * following is called:
+                * Below are some hardcoded request_module() calls to automatically
+                * local fabric modules when the following is called:
                 *
-                * mkdir -p $CONFIGFS/target/iscsi
-                */
-               ret = request_module("iscsi_target_mod");
-               if (ret < 0) {
-                       pr_err("request_module() failed for"
-                               " iscsi_target_mod.ko: %d\n", ret);
-                       return ERR_PTR(-EINVAL);
-               }
-       } else if (!strncmp(name, "loopback", 8)) {
-               /*
-                * Automatically load the tcm_loop fabric module when the
-                * following is called:
+                * mkdir -p /sys/kernel/config/target/$MODULE_NAME
                 *
-                * mkdir -p $CONFIGFS/target/loopback
+                * Note that this does not limit which TCM fabric module can be
+                * registered, but simply provids auto loading logic for modules with
+                * mkdir(2) system calls with known TCM fabric modules.
                 */
-               ret = request_module("tcm_loop");
-               if (ret < 0) {
-                       pr_err("request_module() failed for"
-                               " tcm_loop.ko: %d\n", ret);
-                       return ERR_PTR(-EINVAL);
+
+               if (!strncmp(name, "iscsi", 5)) {
+                       /*
+                        * Automatically load the LIO Target fabric module when the
+                        * following is called:
+                        *
+                        * mkdir -p $CONFIGFS/target/iscsi
+                        */
+                       ret = request_module("iscsi_target_mod");
+                       if (ret < 0) {
+                               pr_err("request_module() failed for"
+                                      " iscsi_target_mod.ko: %d\n", ret);
+                               return ERR_PTR(-EINVAL);
+                       }
+               } else if (!strncmp(name, "loopback", 8)) {
+                       /*
+                        * Automatically load the tcm_loop fabric module when the
+                        * following is called:
+                        *
+                        * mkdir -p $CONFIGFS/target/loopback
+                        */
+                       ret = request_module("tcm_loop");
+                       if (ret < 0) {
+                               pr_err("request_module() failed for"
+                                      " tcm_loop.ko: %d\n", ret);
+                               return ERR_PTR(-EINVAL);
+                       }
                }
+
+               tf = target_core_get_fabric(name);
        }
 
-       tf = target_core_get_fabric(name);
        if (!tf) {
                pr_err("target_core_get_fabric() failed for %s\n",
-                       name);
+                      name);
                return ERR_PTR(-EINVAL);
        }
        pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
@@ -562,198 +584,21 @@ EXPORT_SYMBOL(target_fabric_configfs_deregister);
 // Stop functions called by external Target Fabrics Modules
 //############################################################################*/
 
-/* Start functions for struct config_item_type target_core_dev_attrib_cit */
-
-#define DEF_DEV_ATTRIB_SHOW(_name)                                     \
-static ssize_t target_core_dev_show_attr_##_name(                      \
-       struct se_dev_attrib *da,                                       \
-       char *page)                                                     \
-{                                                                      \
-       return snprintf(page, PAGE_SIZE, "%u\n",                        \
-               (u32)da->da_dev->dev_attrib._name);                     \
-}
-
-#define DEF_DEV_ATTRIB_STORE(_name)                                    \
-static ssize_t target_core_dev_store_attr_##_name(                     \
-       struct se_dev_attrib *da,                                       \
-       const char *page,                                               \
-       size_t count)                                                   \
-{                                                                      \
-       unsigned long val;                                              \
-       int ret;                                                        \
-                                                                       \
-       ret = kstrtoul(page, 0, &val);                          \
-       if (ret < 0) {                                                  \
-               pr_err("kstrtoul() failed with"         \
-                       " ret: %d\n", ret);                             \
-               return -EINVAL;                                         \
-       }                                                               \
-       ret = se_dev_set_##_name(da->da_dev, (u32)val);                 \
-                                                                       \
-       return (!ret) ? count : -EINVAL;                                \
-}
-
-#define DEF_DEV_ATTRIB(_name)                                          \
-DEF_DEV_ATTRIB_SHOW(_name);                                            \
-DEF_DEV_ATTRIB_STORE(_name);
-
-#define DEF_DEV_ATTRIB_RO(_name)                                       \
-DEF_DEV_ATTRIB_SHOW(_name);
+/* Start functions for struct config_item_type tb_dev_attrib_cit */
 
 CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
-#define SE_DEV_ATTR(_name, _mode)                                      \
-static struct target_core_dev_attrib_attribute                         \
-                       target_core_dev_attrib_##_name =                \
-               __CONFIGFS_EATTR(_name, _mode,                          \
-               target_core_dev_show_attr_##_name,                      \
-               target_core_dev_store_attr_##_name);
-
-#define SE_DEV_ATTR_RO(_name);                                         \
-static struct target_core_dev_attrib_attribute                         \
-                       target_core_dev_attrib_##_name =                \
-       __CONFIGFS_EATTR_RO(_name,                                      \
-       target_core_dev_show_attr_##_name);
-
-DEF_DEV_ATTRIB(emulate_model_alias);
-SE_DEV_ATTR(emulate_model_alias, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_dpo);
-SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_fua_write);
-SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_fua_read);
-SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_write_cache);
-SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
-SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_tas);
-SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_tpu);
-SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_tpws);
-SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_caw);
-SE_DEV_ATTR(emulate_caw, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_3pc);
-SE_DEV_ATTR(emulate_3pc, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(pi_prot_type);
-SE_DEV_ATTR(pi_prot_type, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB_RO(hw_pi_prot_type);
-SE_DEV_ATTR_RO(hw_pi_prot_type);
-
-DEF_DEV_ATTRIB(pi_prot_format);
-SE_DEV_ATTR(pi_prot_format, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(enforce_pr_isids);
-SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(is_nonrot);
-SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(emulate_rest_reord);
-SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(force_pr_aptpl);
-SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB_RO(hw_block_size);
-SE_DEV_ATTR_RO(hw_block_size);
-
-DEF_DEV_ATTRIB(block_size);
-SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB_RO(hw_max_sectors);
-SE_DEV_ATTR_RO(hw_max_sectors);
-
-DEF_DEV_ATTRIB(fabric_max_sectors);
-SE_DEV_ATTR(fabric_max_sectors, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(optimal_sectors);
-SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB_RO(hw_queue_depth);
-SE_DEV_ATTR_RO(hw_queue_depth);
-
-DEF_DEV_ATTRIB(queue_depth);
-SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(max_unmap_lba_count);
-SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(max_unmap_block_desc_count);
-SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(unmap_granularity);
-SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(unmap_granularity_alignment);
-SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
-
-DEF_DEV_ATTRIB(max_write_same_len);
-SE_DEV_ATTR(max_write_same_len, S_IRUGO | S_IWUSR);
-
 CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
 
-static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
-       &target_core_dev_attrib_emulate_model_alias.attr,
-       &target_core_dev_attrib_emulate_dpo.attr,
-       &target_core_dev_attrib_emulate_fua_write.attr,
-       &target_core_dev_attrib_emulate_fua_read.attr,
-       &target_core_dev_attrib_emulate_write_cache.attr,
-       &target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
-       &target_core_dev_attrib_emulate_tas.attr,
-       &target_core_dev_attrib_emulate_tpu.attr,
-       &target_core_dev_attrib_emulate_tpws.attr,
-       &target_core_dev_attrib_emulate_caw.attr,
-       &target_core_dev_attrib_emulate_3pc.attr,
-       &target_core_dev_attrib_pi_prot_type.attr,
-       &target_core_dev_attrib_hw_pi_prot_type.attr,
-       &target_core_dev_attrib_pi_prot_format.attr,
-       &target_core_dev_attrib_enforce_pr_isids.attr,
-       &target_core_dev_attrib_force_pr_aptpl.attr,
-       &target_core_dev_attrib_is_nonrot.attr,
-       &target_core_dev_attrib_emulate_rest_reord.attr,
-       &target_core_dev_attrib_hw_block_size.attr,
-       &target_core_dev_attrib_block_size.attr,
-       &target_core_dev_attrib_hw_max_sectors.attr,
-       &target_core_dev_attrib_fabric_max_sectors.attr,
-       &target_core_dev_attrib_optimal_sectors.attr,
-       &target_core_dev_attrib_hw_queue_depth.attr,
-       &target_core_dev_attrib_queue_depth.attr,
-       &target_core_dev_attrib_max_unmap_lba_count.attr,
-       &target_core_dev_attrib_max_unmap_block_desc_count.attr,
-       &target_core_dev_attrib_unmap_granularity.attr,
-       &target_core_dev_attrib_unmap_granularity_alignment.attr,
-       &target_core_dev_attrib_max_write_same_len.attr,
-       NULL,
-};
-
 static struct configfs_item_operations target_core_dev_attrib_ops = {
        .show_attribute         = target_core_dev_attrib_attr_show,
        .store_attribute        = target_core_dev_attrib_attr_store,
 };
 
-static struct config_item_type target_core_dev_attrib_cit = {
-       .ct_item_ops            = &target_core_dev_attrib_ops,
-       .ct_attrs               = target_core_dev_attrib_attrs,
-       .ct_owner               = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_attrib, &target_core_dev_attrib_ops, NULL, NULL);
 
-/* End functions for struct config_item_type target_core_dev_attrib_cit */
+/* End functions for struct config_item_type tb_dev_attrib_cit */
 
-/*  Start functions for struct config_item_type target_core_dev_wwn_cit */
+/*  Start functions for struct config_item_type tb_dev_wwn_cit */
 
 CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
 #define SE_DEV_WWN_ATTR(_name, _mode)                                  \
@@ -984,15 +829,11 @@ static struct configfs_item_operations target_core_dev_wwn_ops = {
        .store_attribute        = target_core_dev_wwn_attr_store,
 };
 
-static struct config_item_type target_core_dev_wwn_cit = {
-       .ct_item_ops            = &target_core_dev_wwn_ops,
-       .ct_attrs               = target_core_dev_wwn_attrs,
-       .ct_owner               = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_wwn, &target_core_dev_wwn_ops, NULL, target_core_dev_wwn_attrs);
 
-/*  End functions for struct config_item_type target_core_dev_wwn_cit */
+/*  End functions for struct config_item_type tb_dev_wwn_cit */
 
-/*  Start functions for struct config_item_type target_core_dev_pr_cit */
+/*  Start functions for struct config_item_type tb_dev_pr_cit */
 
 CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_device);
 #define SE_DEV_PR_ATTR(_name, _mode)                                   \
@@ -1453,15 +1294,11 @@ static struct configfs_item_operations target_core_dev_pr_ops = {
        .store_attribute        = target_core_dev_pr_attr_store,
 };
 
-static struct config_item_type target_core_dev_pr_cit = {
-       .ct_item_ops            = &target_core_dev_pr_ops,
-       .ct_attrs               = target_core_dev_pr_attrs,
-       .ct_owner               = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_pr, &target_core_dev_pr_ops, NULL, target_core_dev_pr_attrs);
 
-/*  End functions for struct config_item_type target_core_dev_pr_cit */
+/*  End functions for struct config_item_type tb_dev_pr_cit */
 
-/*  Start functions for struct config_item_type target_core_dev_cit */
+/*  Start functions for struct config_item_type tb_dev_cit */
 
 static ssize_t target_core_show_dev_info(void *p, char *page)
 {
@@ -1925,7 +1762,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_lba_map = {
        .store  = target_core_store_dev_lba_map,
 };
 
-static struct configfs_attribute *lio_core_dev_attrs[] = {
+static struct configfs_attribute *target_core_dev_attrs[] = {
        &target_core_attr_dev_info.attr,
        &target_core_attr_dev_control.attr,
        &target_core_attr_dev_alias.attr,
@@ -1984,13 +1821,9 @@ static struct configfs_item_operations target_core_dev_item_ops = {
        .store_attribute        = target_core_dev_store,
 };
 
-static struct config_item_type target_core_dev_cit = {
-       .ct_item_ops            = &target_core_dev_item_ops,
-       .ct_attrs               = lio_core_dev_attrs,
-       .ct_owner               = THIS_MODULE,
-};
+TB_CIT_SETUP(dev, &target_core_dev_item_ops, NULL, target_core_dev_attrs);
 
-/* End functions for struct config_item_type target_core_dev_cit */
+/* End functions for struct config_item_type tb_dev_cit */
 
 /* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
 
@@ -2670,7 +2503,7 @@ static struct config_item_type target_core_alua_tg_pt_gp_cit = {
 
 /* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
 
-/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+/* Start functions for struct config_item_type tb_alua_tg_pt_gps_cit */
 
 static struct config_group *target_core_alua_create_tg_pt_gp(
        struct config_group *group,
@@ -2721,12 +2554,9 @@ static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
        .drop_item              = &target_core_alua_drop_tg_pt_gp,
 };
 
-static struct config_item_type target_core_alua_tg_pt_gps_cit = {
-       .ct_group_ops           = &target_core_alua_tg_pt_gps_group_ops,
-       .ct_owner               = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_alua_tg_pt_gps, NULL, &target_core_alua_tg_pt_gps_group_ops, NULL);
 
-/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
+/* End functions for struct config_item_type tb_alua_tg_pt_gps_cit */
 
 /* Start functions for struct config_item_type target_core_alua_cit */
 
@@ -2744,7 +2574,7 @@ static struct config_item_type target_core_alua_cit = {
 
 /* End functions for struct config_item_type target_core_alua_cit */
 
-/* Start functions for struct config_item_type target_core_stat_cit */
+/* Start functions for struct config_item_type tb_dev_stat_cit */
 
 static struct config_group *target_core_stat_mkdir(
        struct config_group *group,
@@ -2765,12 +2595,9 @@ static struct configfs_group_operations target_core_stat_group_ops = {
        .drop_item              = &target_core_stat_rmdir,
 };
 
-static struct config_item_type target_core_stat_cit = {
-       .ct_group_ops           = &target_core_stat_group_ops,
-       .ct_owner               = THIS_MODULE,
-};
+TB_CIT_SETUP(dev_stat, NULL, &target_core_stat_group_ops, NULL);
 
-/* End functions for struct config_item_type target_core_stat_cit */
+/* End functions for struct config_item_type tb_dev_stat_cit */
 
 /* Start functions for struct config_item_type target_core_hba_cit */
 
@@ -2806,17 +2633,17 @@ static struct config_group *target_core_make_subdev(
        if (!dev_cg->default_groups)
                goto out_free_device;
 
-       config_group_init_type_name(dev_cg, name, &target_core_dev_cit);
+       config_group_init_type_name(dev_cg, name, &t->tb_cits.tb_dev_cit);
        config_group_init_type_name(&dev->dev_attrib.da_group, "attrib",
-                       &target_core_dev_attrib_cit);
+                       &t->tb_cits.tb_dev_attrib_cit);
        config_group_init_type_name(&dev->dev_pr_group, "pr",
-                       &target_core_dev_pr_cit);
+                       &t->tb_cits.tb_dev_pr_cit);
        config_group_init_type_name(&dev->t10_wwn.t10_wwn_group, "wwn",
-                       &target_core_dev_wwn_cit);
+                       &t->tb_cits.tb_dev_wwn_cit);
        config_group_init_type_name(&dev->t10_alua.alua_tg_pt_gps_group,
-                       "alua", &target_core_alua_tg_pt_gps_cit);
+                       "alua", &t->tb_cits.tb_dev_alua_tg_pt_gps_cit);
        config_group_init_type_name(&dev->dev_stat_grps.stat_group,
-                       "statistics", &target_core_stat_cit);
+                       "statistics", &t->tb_cits.tb_dev_stat_cit);
 
        dev_cg->default_groups[0] = &dev->dev_attrib.da_group;
        dev_cg->default_groups[1] = &dev->dev_pr_group;
@@ -3110,6 +2937,17 @@ static struct config_item_type target_core_cit = {
 
 /* Stop functions for struct config_item_type target_core_hba_cit */
 
+void target_core_setup_sub_cits(struct se_subsystem_api *sa)
+{
+       target_core_setup_dev_cit(sa);
+       target_core_setup_dev_attrib_cit(sa);
+       target_core_setup_dev_pr_cit(sa);
+       target_core_setup_dev_wwn_cit(sa);
+       target_core_setup_dev_alua_tg_pt_gps_cit(sa);
+       target_core_setup_dev_stat_cit(sa);
+}
+EXPORT_SYMBOL(target_core_setup_sub_cits);
+
 static int __init target_core_init_configfs(void)
 {
        struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
index c45f9e907e44c168de68060b83737e302533f785..7653cfb027a200cbec0dd51c95047708837c7227 100644 (file)
@@ -659,6 +659,7 @@ int se_dev_set_max_unmap_lba_count(
                        dev, dev->dev_attrib.max_unmap_lba_count);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_max_unmap_lba_count);
 
 int se_dev_set_max_unmap_block_desc_count(
        struct se_device *dev,
@@ -670,6 +671,7 @@ int se_dev_set_max_unmap_block_desc_count(
                        dev, dev->dev_attrib.max_unmap_block_desc_count);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_max_unmap_block_desc_count);
 
 int se_dev_set_unmap_granularity(
        struct se_device *dev,
@@ -680,6 +682,7 @@ int se_dev_set_unmap_granularity(
                        dev, dev->dev_attrib.unmap_granularity);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_unmap_granularity);
 
 int se_dev_set_unmap_granularity_alignment(
        struct se_device *dev,
@@ -690,6 +693,7 @@ int se_dev_set_unmap_granularity_alignment(
                        dev, dev->dev_attrib.unmap_granularity_alignment);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_unmap_granularity_alignment);
 
 int se_dev_set_max_write_same_len(
        struct se_device *dev,
@@ -700,6 +704,7 @@ int se_dev_set_max_write_same_len(
                        dev, dev->dev_attrib.max_write_same_len);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_max_write_same_len);
 
 static void dev_set_t10_wwn_model_alias(struct se_device *dev)
 {
@@ -738,6 +743,7 @@ int se_dev_set_emulate_model_alias(struct se_device *dev, int flag)
 
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_emulate_model_alias);
 
 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
 {
@@ -753,6 +759,7 @@ int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
 
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_emulate_dpo);
 
 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
 {
@@ -760,17 +767,12 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-
-       if (flag &&
-           dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
-               pr_err("emulate_fua_write not supported for pSCSI\n");
-               return -EINVAL;
-       }
        dev->dev_attrib.emulate_fua_write = flag;
        pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
                        dev, dev->dev_attrib.emulate_fua_write);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_emulate_fua_write);
 
 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
 {
@@ -786,6 +788,7 @@ int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
 
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_emulate_fua_read);
 
 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
 {
@@ -793,11 +796,6 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-       if (flag &&
-           dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
-               pr_err("emulate_write_cache not supported for pSCSI\n");
-               return -EINVAL;
-       }
        if (flag &&
            dev->transport->get_write_cache) {
                pr_err("emulate_write_cache not supported for this device\n");
@@ -809,6 +807,7 @@ int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
                        dev, dev->dev_attrib.emulate_write_cache);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_emulate_write_cache);
 
 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
 {
@@ -829,6 +828,7 @@ int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
 
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_emulate_ua_intlck_ctrl);
 
 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
 {
@@ -849,6 +849,7 @@ int se_dev_set_emulate_tas(struct se_device *dev, int flag)
 
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_emulate_tas);
 
 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
 {
@@ -870,6 +871,7 @@ int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
                                dev, flag);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_emulate_tpu);
 
 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
 {
@@ -891,6 +893,7 @@ int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
                                dev, flag);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_emulate_tpws);
 
 int se_dev_set_emulate_caw(struct se_device *dev, int flag)
 {
@@ -904,6 +907,7 @@ int se_dev_set_emulate_caw(struct se_device *dev, int flag)
 
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_emulate_caw);
 
 int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
 {
@@ -917,6 +921,7 @@ int se_dev_set_emulate_3pc(struct se_device *dev, int flag)
 
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_emulate_3pc);
 
 int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
 {
@@ -970,6 +975,7 @@ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
 
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_pi_prot_type);
 
 int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
 {
@@ -1005,6 +1011,7 @@ int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
 
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_pi_prot_format);
 
 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
 {
@@ -1017,6 +1024,7 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
                (dev->dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_enforce_pr_isids);
 
 int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
 {
@@ -1034,6 +1042,7 @@ int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
        pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_force_pr_aptpl);
 
 int se_dev_set_is_nonrot(struct se_device *dev, int flag)
 {
@@ -1046,6 +1055,7 @@ int se_dev_set_is_nonrot(struct se_device *dev, int flag)
               dev, flag);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_is_nonrot);
 
 int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
 {
@@ -1058,6 +1068,7 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
        pr_debug("dev[%p]: SE Device emulate_rest_reord: %d\n", dev, flag);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_emulate_rest_reord);
 
 /*
  * Note, this can only be called on unexported SE Device Object.
@@ -1076,31 +1087,21 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
                return -EINVAL;
        }
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
+       if (queue_depth > dev->dev_attrib.queue_depth) {
                if (queue_depth > dev->dev_attrib.hw_queue_depth) {
-                       pr_err("dev[%p]: Passed queue_depth: %u"
-                               " exceeds TCM/SE_Device TCQ: %u\n",
-                               dev, queue_depth,
+                       pr_err("dev[%p]: Passed queue_depth:"
+                               " %u exceeds TCM/SE_Device MAX"
+                               " TCQ: %u\n", dev, queue_depth,
                                dev->dev_attrib.hw_queue_depth);
                        return -EINVAL;
                }
-       } else {
-               if (queue_depth > dev->dev_attrib.queue_depth) {
-                       if (queue_depth > dev->dev_attrib.hw_queue_depth) {
-                               pr_err("dev[%p]: Passed queue_depth:"
-                                       " %u exceeds TCM/SE_Device MAX"
-                                       " TCQ: %u\n", dev, queue_depth,
-                                       dev->dev_attrib.hw_queue_depth);
-                               return -EINVAL;
-                       }
-               }
        }
-
        dev->dev_attrib.queue_depth = dev->queue_depth = queue_depth;
        pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
                        dev, queue_depth);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_queue_depth);
 
 int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
 {
@@ -1123,22 +1124,12 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
                                DA_STATUS_MAX_SECTORS_MIN);
                return -EINVAL;
        }
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
-               if (fabric_max_sectors > dev->dev_attrib.hw_max_sectors) {
-                       pr_err("dev[%p]: Passed fabric_max_sectors: %u"
-                               " greater than TCM/SE_Device max_sectors:"
-                               " %u\n", dev, fabric_max_sectors,
-                               dev->dev_attrib.hw_max_sectors);
-                        return -EINVAL;
-               }
-       } else {
-               if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
-                       pr_err("dev[%p]: Passed fabric_max_sectors: %u"
-                               " greater than DA_STATUS_MAX_SECTORS_MAX:"
-                               " %u\n", dev, fabric_max_sectors,
-                               DA_STATUS_MAX_SECTORS_MAX);
-                       return -EINVAL;
-               }
+       if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
+               pr_err("dev[%p]: Passed fabric_max_sectors: %u"
+                       " greater than DA_STATUS_MAX_SECTORS_MAX:"
+                       " %u\n", dev, fabric_max_sectors,
+                       DA_STATUS_MAX_SECTORS_MAX);
+               return -EINVAL;
        }
        /*
         * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
@@ -1155,6 +1146,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
                        dev, fabric_max_sectors);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
 
 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
 {
@@ -1164,11 +1156,6 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
                        dev, dev->export_count);
                return -EINVAL;
        }
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
-               pr_err("dev[%p]: Passed optimal_sectors cannot be"
-                               " changed for TCM/pSCSI\n", dev);
-               return -EINVAL;
-       }
        if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
                pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
                        " greater than fabric_max_sectors: %u\n", dev,
@@ -1181,6 +1168,7 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
                        dev, optimal_sectors);
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_optimal_sectors);
 
 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
 {
@@ -1201,13 +1189,6 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
                return -EINVAL;
        }
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
-               pr_err("dev[%p]: Not allowed to change block_size for"
-                       " Physical Device, use for Linux/SCSI to change"
-                       " block_size for underlying hardware\n", dev);
-               return -EINVAL;
-       }
-
        dev->dev_attrib.block_size = block_size;
        pr_debug("dev[%p]: SE Device block_size changed to %u\n",
                        dev, block_size);
@@ -1218,6 +1199,7 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
 
        return 0;
 }
+EXPORT_SYMBOL(se_dev_set_block_size);
 
 struct se_lun *core_dev_add_lun(
        struct se_portal_group *tpg,
index 72c83d98662bb7117d3911ab931940990a79f82d..c2aea099ea4adf7c0ee60ac7949fa02089162932 100644 (file)
@@ -37,6 +37,7 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
 
 #include "target_core_file.h"
 
@@ -934,6 +935,42 @@ fd_parse_cdb(struct se_cmd *cmd)
        return sbc_parse_cdb(cmd, &fd_sbc_ops);
 }
 
+DEF_TB_DEFAULT_ATTRIBS(fileio);
+
+static struct configfs_attribute *fileio_backend_dev_attrs[] = {
+       &fileio_dev_attrib_emulate_model_alias.attr,
+       &fileio_dev_attrib_emulate_dpo.attr,
+       &fileio_dev_attrib_emulate_fua_write.attr,
+       &fileio_dev_attrib_emulate_fua_read.attr,
+       &fileio_dev_attrib_emulate_write_cache.attr,
+       &fileio_dev_attrib_emulate_ua_intlck_ctrl.attr,
+       &fileio_dev_attrib_emulate_tas.attr,
+       &fileio_dev_attrib_emulate_tpu.attr,
+       &fileio_dev_attrib_emulate_tpws.attr,
+       &fileio_dev_attrib_emulate_caw.attr,
+       &fileio_dev_attrib_emulate_3pc.attr,
+       &fileio_dev_attrib_pi_prot_type.attr,
+       &fileio_dev_attrib_hw_pi_prot_type.attr,
+       &fileio_dev_attrib_pi_prot_format.attr,
+       &fileio_dev_attrib_enforce_pr_isids.attr,
+       &fileio_dev_attrib_is_nonrot.attr,
+       &fileio_dev_attrib_emulate_rest_reord.attr,
+       &fileio_dev_attrib_force_pr_aptpl.attr,
+       &fileio_dev_attrib_hw_block_size.attr,
+       &fileio_dev_attrib_block_size.attr,
+       &fileio_dev_attrib_hw_max_sectors.attr,
+       &fileio_dev_attrib_fabric_max_sectors.attr,
+       &fileio_dev_attrib_optimal_sectors.attr,
+       &fileio_dev_attrib_hw_queue_depth.attr,
+       &fileio_dev_attrib_queue_depth.attr,
+       &fileio_dev_attrib_max_unmap_lba_count.attr,
+       &fileio_dev_attrib_max_unmap_block_desc_count.attr,
+       &fileio_dev_attrib_unmap_granularity.attr,
+       &fileio_dev_attrib_unmap_granularity_alignment.attr,
+       &fileio_dev_attrib_max_write_same_len.attr,
+       NULL,
+};
+
 static struct se_subsystem_api fileio_template = {
        .name                   = "fileio",
        .inquiry_prod           = "FILEIO",
@@ -957,6 +994,11 @@ static struct se_subsystem_api fileio_template = {
 
 static int __init fileio_module_init(void)
 {
+       struct target_backend_cits *tbc = &fileio_template.tb_cits;
+
+       target_core_setup_sub_cits(&fileio_template);
+       tbc->tb_dev_attrib_cit.ct_attrs = fileio_backend_dev_attrs;
+
        return transport_subsystem_register(&fileio_template);
 }
 
index a25051a37dd78fe7c17b7a807eb882332df828be..ff95f95dcd13d571a59c70b7f6a7b1c6d0a6120d 100644 (file)
@@ -36,6 +36,7 @@
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
 #include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
 
 #include "target_core_internal.h"
 
@@ -137,8 +138,7 @@ core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
        return hba;
 
 out_module_put:
-       if (hba->transport->owner)
-               module_put(hba->transport->owner);
+       module_put(hba->transport->owner);
        hba->transport = NULL;
 out_free_hba:
        kfree(hba);
@@ -159,8 +159,7 @@ core_delete_hba(struct se_hba *hba)
        pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
                        " Core\n", hba->hba_id);
 
-       if (hba->transport->owner)
-               module_put(hba->transport->owner);
+       module_put(hba->transport->owner);
 
        hba->transport = NULL;
        kfree(hba);
index 7e6b857c6b3f18563a8325cb471780c39ade158f..3efff94fbd9788838f565218965d180b78a67604 100644 (file)
@@ -41,6 +41,7 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
 
 #include "target_core_iblock.h"
 
@@ -858,6 +859,42 @@ static bool iblock_get_write_cache(struct se_device *dev)
        return q->flush_flags & REQ_FLUSH;
 }
 
+DEF_TB_DEFAULT_ATTRIBS(iblock);
+
+static struct configfs_attribute *iblock_backend_dev_attrs[] = {
+       &iblock_dev_attrib_emulate_model_alias.attr,
+       &iblock_dev_attrib_emulate_dpo.attr,
+       &iblock_dev_attrib_emulate_fua_write.attr,
+       &iblock_dev_attrib_emulate_fua_read.attr,
+       &iblock_dev_attrib_emulate_write_cache.attr,
+       &iblock_dev_attrib_emulate_ua_intlck_ctrl.attr,
+       &iblock_dev_attrib_emulate_tas.attr,
+       &iblock_dev_attrib_emulate_tpu.attr,
+       &iblock_dev_attrib_emulate_tpws.attr,
+       &iblock_dev_attrib_emulate_caw.attr,
+       &iblock_dev_attrib_emulate_3pc.attr,
+       &iblock_dev_attrib_pi_prot_type.attr,
+       &iblock_dev_attrib_hw_pi_prot_type.attr,
+       &iblock_dev_attrib_pi_prot_format.attr,
+       &iblock_dev_attrib_enforce_pr_isids.attr,
+       &iblock_dev_attrib_is_nonrot.attr,
+       &iblock_dev_attrib_emulate_rest_reord.attr,
+       &iblock_dev_attrib_force_pr_aptpl.attr,
+       &iblock_dev_attrib_hw_block_size.attr,
+       &iblock_dev_attrib_block_size.attr,
+       &iblock_dev_attrib_hw_max_sectors.attr,
+       &iblock_dev_attrib_fabric_max_sectors.attr,
+       &iblock_dev_attrib_optimal_sectors.attr,
+       &iblock_dev_attrib_hw_queue_depth.attr,
+       &iblock_dev_attrib_queue_depth.attr,
+       &iblock_dev_attrib_max_unmap_lba_count.attr,
+       &iblock_dev_attrib_max_unmap_block_desc_count.attr,
+       &iblock_dev_attrib_unmap_granularity.attr,
+       &iblock_dev_attrib_unmap_granularity_alignment.attr,
+       &iblock_dev_attrib_max_write_same_len.attr,
+       NULL,
+};
+
 static struct se_subsystem_api iblock_template = {
        .name                   = "iblock",
        .inquiry_prod           = "IBLOCK",
@@ -883,6 +920,11 @@ static struct se_subsystem_api iblock_template = {
 
 static int __init iblock_module_init(void)
 {
+       struct target_backend_cits *tbc = &iblock_template.tb_cits;
+
+       target_core_setup_sub_cits(&iblock_template);
+       tbc->tb_dev_attrib_cit.ct_attrs = iblock_backend_dev_attrs;
+
        return transport_subsystem_register(&iblock_template);
 }
 
index e31f42f369fff45d6bfd9c260e7483787b646472..60381db9002622bd1c11e68ded07f78648e1fec2 100644 (file)
@@ -18,34 +18,6 @@ int  core_dev_export(struct se_device *, struct se_portal_group *,
                struct se_lun *);
 void   core_dev_unexport(struct se_device *, struct se_portal_group *,
                struct se_lun *);
-int    se_dev_set_task_timeout(struct se_device *, u32);
-int    se_dev_set_max_unmap_lba_count(struct se_device *, u32);
-int    se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
-int    se_dev_set_unmap_granularity(struct se_device *, u32);
-int    se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
-int    se_dev_set_max_write_same_len(struct se_device *, u32);
-int    se_dev_set_emulate_model_alias(struct se_device *, int);
-int    se_dev_set_emulate_dpo(struct se_device *, int);
-int    se_dev_set_emulate_fua_write(struct se_device *, int);
-int    se_dev_set_emulate_fua_read(struct se_device *, int);
-int    se_dev_set_emulate_write_cache(struct se_device *, int);
-int    se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
-int    se_dev_set_emulate_tas(struct se_device *, int);
-int    se_dev_set_emulate_tpu(struct se_device *, int);
-int    se_dev_set_emulate_tpws(struct se_device *, int);
-int    se_dev_set_emulate_caw(struct se_device *, int);
-int    se_dev_set_emulate_3pc(struct se_device *, int);
-int    se_dev_set_pi_prot_type(struct se_device *, int);
-int    se_dev_set_pi_prot_format(struct se_device *, int);
-int    se_dev_set_enforce_pr_isids(struct se_device *, int);
-int    se_dev_set_force_pr_aptpl(struct se_device *, int);
-int    se_dev_set_is_nonrot(struct se_device *, int);
-int    se_dev_set_emulate_rest_reord(struct se_device *dev, int);
-int    se_dev_set_queue_depth(struct se_device *, u32);
-int    se_dev_set_max_sectors(struct se_device *, u32);
-int    se_dev_set_fabric_max_sectors(struct se_device *, u32);
-int    se_dev_set_optimal_sectors(struct se_device *, u32);
-int    se_dev_set_block_size(struct se_device *, u32);
 struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
 void   core_dev_del_lun(struct se_portal_group *, struct se_lun *);
 struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
index 4c261c33cf55e62394db8b22cf312fdbb352d8f7..d56f2aaba9af9a6bb4b89d5c1080d426cba84e63 100644 (file)
@@ -76,7 +76,7 @@ enum preempt_type {
 };
 
 static void __core_scsi3_complete_pro_release(struct se_device *, struct se_node_acl *,
-                       struct t10_pr_registration *, int);
+                                             struct t10_pr_registration *, int, int);
 
 static sense_reason_t
 target_scsi2_reservation_check(struct se_cmd *cmd)
@@ -1177,7 +1177,7 @@ static int core_scsi3_check_implicit_release(
                 *    service action with the SERVICE ACTION RESERVATION KEY
                 *    field set to zero (see 5.7.11.3).
                 */
-               __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0);
+               __core_scsi3_complete_pro_release(dev, nacl, pr_reg, 0, 1);
                ret = 1;
                /*
                 * For 'All Registrants' reservation types, all existing
@@ -1219,7 +1219,8 @@ static void __core_scsi3_free_registration(
 
        pr_reg->pr_reg_deve->def_pr_registered = 0;
        pr_reg->pr_reg_deve->pr_res_key = 0;
-       list_del(&pr_reg->pr_reg_list);
+       if (!list_empty(&pr_reg->pr_reg_list))
+               list_del(&pr_reg->pr_reg_list);
        /*
         * Caller accessing *pr_reg using core_scsi3_locate_pr_reg(),
         * so call core_scsi3_put_pr_reg() to decrement our reference.
@@ -1271,6 +1272,7 @@ void core_scsi3_free_pr_reg_from_nacl(
 {
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
        struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_res_holder;
+       bool free_reg = false;
        /*
         * If the passed se_node_acl matches the reservation holder,
         * release the reservation.
@@ -1278,13 +1280,18 @@ void core_scsi3_free_pr_reg_from_nacl(
        spin_lock(&dev->dev_reservation_lock);
        pr_res_holder = dev->dev_pr_res_holder;
        if ((pr_res_holder != NULL) &&
-           (pr_res_holder->pr_reg_nacl == nacl))
-               __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0);
+           (pr_res_holder->pr_reg_nacl == nacl)) {
+               __core_scsi3_complete_pro_release(dev, nacl, pr_res_holder, 0, 1);
+               free_reg = true;
+       }
        spin_unlock(&dev->dev_reservation_lock);
        /*
         * Release any registration associated with the struct se_node_acl.
         */
        spin_lock(&pr_tmpl->registration_lock);
+       if (pr_res_holder && free_reg)
+               __core_scsi3_free_registration(dev, pr_res_holder, NULL, 0);
+
        list_for_each_entry_safe(pr_reg, pr_reg_tmp,
                        &pr_tmpl->registration_list, pr_reg_list) {
 
@@ -1307,7 +1314,7 @@ void core_scsi3_free_all_registrations(
        if (pr_res_holder != NULL) {
                struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
                __core_scsi3_complete_pro_release(dev, pr_res_nacl,
-                               pr_res_holder, 0);
+                                                 pr_res_holder, 0, 0);
        }
        spin_unlock(&dev->dev_reservation_lock);
 
@@ -1429,14 +1436,12 @@ core_scsi3_decode_spec_i_port(
        struct target_core_fabric_ops *tmp_tf_ops;
        unsigned char *buf;
        unsigned char *ptr, *i_str = NULL, proto_ident, tmp_proto_ident;
-       char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+       char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
        sense_reason_t ret;
        u32 tpdl, tid_len = 0;
        int dest_local_nexus;
        u32 dest_rtpi = 0;
 
-       memset(dest_iport, 0, 64);
-
        local_se_deve = se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
        /*
         * Allocate a struct pr_transport_id_holder and setup the
@@ -2105,13 +2110,13 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
                /*
                 * sa_res_key=0 Unregister Reservation Key for registered I_T Nexus.
                 */
-               pr_holder = core_scsi3_check_implicit_release(
-                               cmd->se_dev, pr_reg);
+               type = pr_reg->pr_res_type;
+               pr_holder = core_scsi3_check_implicit_release(cmd->se_dev,
+                                                             pr_reg);
                if (pr_holder < 0) {
                        ret = TCM_RESERVATION_CONFLICT;
                        goto out;
                }
-               type = pr_reg->pr_res_type;
 
                spin_lock(&pr_tmpl->registration_lock);
                /*
@@ -2269,6 +2274,7 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
        spin_lock(&dev->dev_reservation_lock);
        pr_res_holder = dev->dev_pr_res_holder;
        if (pr_res_holder) {
+               int pr_res_type = pr_res_holder->pr_res_type;
                /*
                 * From spc4r17 Section 5.7.9: Reserving:
                 *
@@ -2279,7 +2285,9 @@ core_scsi3_pro_reserve(struct se_cmd *cmd, int type, int scope, u64 res_key)
                 * the logical unit, then the command shall be completed with
                 * RESERVATION CONFLICT status.
                 */
-               if (pr_res_holder != pr_reg) {
+               if ((pr_res_holder != pr_reg) &&
+                   (pr_res_type != PR_TYPE_WRITE_EXCLUSIVE_ALLREG) &&
+                   (pr_res_type != PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
                        struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
                        pr_err("SPC-3 PR: Attempted RESERVE from"
                                " [%s]: %s while reservation already held by"
@@ -2385,23 +2393,59 @@ static void __core_scsi3_complete_pro_release(
        struct se_device *dev,
        struct se_node_acl *se_nacl,
        struct t10_pr_registration *pr_reg,
-       int explicit)
+       int explicit,
+       int unreg)
 {
        struct target_core_fabric_ops *tfo = se_nacl->se_tpg->se_tpg_tfo;
        char i_buf[PR_REG_ISID_ID_LEN];
+       int pr_res_type = 0, pr_res_scope = 0;
 
        memset(i_buf, 0, PR_REG_ISID_ID_LEN);
        core_pr_dump_initiator_port(pr_reg, i_buf, PR_REG_ISID_ID_LEN);
        /*
         * Go ahead and release the current PR reservation holder.
+        * If an All Registrants reservation is currently active and
+        * a unregister operation is requested, replace the current
+        * dev_pr_res_holder with another active registration.
         */
-       dev->dev_pr_res_holder = NULL;
+       if (dev->dev_pr_res_holder) {
+               pr_res_type = dev->dev_pr_res_holder->pr_res_type;
+               pr_res_scope = dev->dev_pr_res_holder->pr_res_scope;
+               dev->dev_pr_res_holder->pr_res_type = 0;
+               dev->dev_pr_res_holder->pr_res_scope = 0;
+               dev->dev_pr_res_holder->pr_res_holder = 0;
+               dev->dev_pr_res_holder = NULL;
+       }
+       if (!unreg)
+               goto out;
 
-       pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
-               " reservation holder TYPE: %s ALL_TG_PT: %d\n",
-               tfo->get_fabric_name(), (explicit) ? "explicit" : "implicit",
-               core_scsi3_pr_dump_type(pr_reg->pr_res_type),
-               (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+       spin_lock(&dev->t10_pr.registration_lock);
+       list_del_init(&pr_reg->pr_reg_list);
+       /*
+        * If the I_T nexus is a reservation holder, the persistent reservation
+        * is of an all registrants type, and the I_T nexus is the last remaining
+        * registered I_T nexus, then the device server shall also release the
+        * persistent reservation.
+        */
+       if (!list_empty(&dev->t10_pr.registration_list) &&
+           ((pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
+            (pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))) {
+               dev->dev_pr_res_holder =
+                       list_entry(dev->t10_pr.registration_list.next,
+                                  struct t10_pr_registration, pr_reg_list);
+               dev->dev_pr_res_holder->pr_res_type = pr_res_type;
+               dev->dev_pr_res_holder->pr_res_scope = pr_res_scope;
+               dev->dev_pr_res_holder->pr_res_holder = 1;
+       }
+       spin_unlock(&dev->t10_pr.registration_lock);
+out:
+       if (!dev->dev_pr_res_holder) {
+               pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+                       " reservation holder TYPE: %s ALL_TG_PT: %d\n",
+                       tfo->get_fabric_name(), (explicit) ? "explicit" :
+                       "implicit", core_scsi3_pr_dump_type(pr_res_type),
+                       (pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
+       }
        pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
                tfo->get_fabric_name(), se_nacl->initiatorname,
                i_buf);
@@ -2532,7 +2576,7 @@ core_scsi3_emulate_pro_release(struct se_cmd *cmd, int type, int scope,
         *    server shall not establish a unit attention condition.
         */
        __core_scsi3_complete_pro_release(dev, se_sess->se_node_acl,
-                       pr_reg, 1);
+                                         pr_reg, 1, 0);
 
        spin_unlock(&dev->dev_reservation_lock);
 
@@ -2620,7 +2664,7 @@ core_scsi3_emulate_pro_clear(struct se_cmd *cmd, u64 res_key)
        if (pr_res_holder) {
                struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
                __core_scsi3_complete_pro_release(dev, pr_res_nacl,
-                       pr_res_holder, 0);
+                                                 pr_res_holder, 0, 0);
        }
        spin_unlock(&dev->dev_reservation_lock);
        /*
@@ -2679,7 +2723,7 @@ static void __core_scsi3_complete_pro_preempt(
         */
        if (dev->dev_pr_res_holder)
                __core_scsi3_complete_pro_release(dev, nacl,
-                               dev->dev_pr_res_holder, 0);
+                                                 dev->dev_pr_res_holder, 0, 0);
 
        dev->dev_pr_res_holder = pr_reg;
        pr_reg->pr_res_holder = 1;
@@ -2924,8 +2968,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
         */
        if (pr_reg_n != pr_res_holder)
                __core_scsi3_complete_pro_release(dev,
-                               pr_res_holder->pr_reg_nacl,
-                               dev->dev_pr_res_holder, 0);
+                                                 pr_res_holder->pr_reg_nacl,
+                                                 dev->dev_pr_res_holder, 0, 0);
        /*
         * b) Remove the registrations for all I_T nexuses identified
         *    by the SERVICE ACTION RESERVATION KEY field, except the
@@ -3059,7 +3103,7 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
        struct t10_reservation *pr_tmpl = &dev->t10_pr;
        unsigned char *buf;
        unsigned char *initiator_str;
-       char *iport_ptr = NULL, dest_iport[64], i_buf[PR_REG_ISID_ID_LEN];
+       char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
        u32 tid_len, tmp_tid_len;
        int new_reg = 0, type, scope, matching_iname;
        sense_reason_t ret;
@@ -3071,7 +3115,6 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
 
-       memset(dest_iport, 0, 64);
        memset(i_buf, 0, PR_REG_ISID_ID_LEN);
        se_tpg = se_sess->se_tpg;
        tf_ops = se_tpg->se_tpg_tfo;
@@ -3389,7 +3432,7 @@ after_iport_check:
         *    holder (i.e., the I_T nexus on which the
         */
        __core_scsi3_complete_pro_release(dev, pr_res_nacl,
-                       dev->dev_pr_res_holder, 0);
+                                         dev->dev_pr_res_holder, 0, 0);
        /*
         * g) Move the persistent reservation to the specified I_T nexus using
         *    the same scope and type as the persistent reservation released in
@@ -3837,7 +3880,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
        unsigned char *buf;
        u32 add_desc_len = 0, add_len = 0, desc_len, exp_desc_len;
        u32 off = 8; /* off into first Full Status descriptor */
-       int format_code = 0;
+       int format_code = 0, pr_res_type = 0, pr_res_scope = 0;
+       bool all_reg = false;
 
        if (cmd->data_length < 8) {
                pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
@@ -3854,6 +3898,19 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
        buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff);
        buf[3] = (dev->t10_pr.pr_generation & 0xff);
 
+       spin_lock(&dev->dev_reservation_lock);
+       if (dev->dev_pr_res_holder) {
+               struct t10_pr_registration *pr_holder = dev->dev_pr_res_holder;
+
+               if (pr_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG ||
+                   pr_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG) {
+                       all_reg = true;
+                       pr_res_type = pr_holder->pr_res_type;
+                       pr_res_scope = pr_holder->pr_res_scope;
+               }
+       }
+       spin_unlock(&dev->dev_reservation_lock);
+
        spin_lock(&pr_tmpl->registration_lock);
        list_for_each_entry_safe(pr_reg, pr_reg_tmp,
                        &pr_tmpl->registration_list, pr_reg_list) {
@@ -3901,14 +3958,20 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
                 * reservation holder for PR_HOLDER bit.
                 *
                 * Also, if this registration is the reservation
-                * holder, fill in SCOPE and TYPE in the next byte.
+                * holder or there is an All Registrants reservation
+                * active, fill in SCOPE and TYPE in the next byte.
                 */
                if (pr_reg->pr_res_holder) {
                        buf[off++] |= 0x01;
                        buf[off++] = (pr_reg->pr_res_scope & 0xf0) |
                                     (pr_reg->pr_res_type & 0x0f);
-               } else
+               } else if (all_reg) {
+                       buf[off++] |= 0x01;
+                       buf[off++] = (pr_res_scope & 0xf0) |
+                                    (pr_res_type & 0x0f);
+               } else {
                        off += 2;
+               }
 
                off += 4; /* Skip over reserved area */
                /*
index 7c8291f0bbbce5f519bce28b1c6db8d4269a62bd..1045dcd7bf651b679ee355a19401065d559976b1 100644 (file)
@@ -44,6 +44,7 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
 
 #include "target_core_alua.h"
 #include "target_core_pscsi.h"
@@ -1094,7 +1095,7 @@ pscsi_execute_cmd(struct se_cmd *cmd)
        req->retries = PS_RETRY;
 
        blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req,
-                       (cmd->sam_task_attr == MSG_HEAD_TAG),
+                       (cmd->sam_task_attr == TCM_HEAD_TAG),
                        pscsi_req_done);
 
        return 0;
@@ -1165,6 +1166,26 @@ static void pscsi_req_done(struct request *req, int uptodate)
        kfree(pt);
 }
 
+DEF_TB_DEV_ATTRIB_RO(pscsi, hw_pi_prot_type);
+TB_DEV_ATTR_RO(pscsi, hw_pi_prot_type);
+
+DEF_TB_DEV_ATTRIB_RO(pscsi, hw_block_size);
+TB_DEV_ATTR_RO(pscsi, hw_block_size);
+
+DEF_TB_DEV_ATTRIB_RO(pscsi, hw_max_sectors);
+TB_DEV_ATTR_RO(pscsi, hw_max_sectors);
+
+DEF_TB_DEV_ATTRIB_RO(pscsi, hw_queue_depth);
+TB_DEV_ATTR_RO(pscsi, hw_queue_depth);
+
+static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
+       &pscsi_dev_attrib_hw_pi_prot_type.attr,
+       &pscsi_dev_attrib_hw_block_size.attr,
+       &pscsi_dev_attrib_hw_max_sectors.attr,
+       &pscsi_dev_attrib_hw_queue_depth.attr,
+       NULL,
+};
+
 static struct se_subsystem_api pscsi_template = {
        .name                   = "pscsi",
        .owner                  = THIS_MODULE,
@@ -1185,6 +1206,11 @@ static struct se_subsystem_api pscsi_template = {
 
 static int __init pscsi_module_init(void)
 {
+       struct target_backend_cits *tbc = &pscsi_template.tb_cits;
+
+       target_core_setup_sub_cits(&pscsi_template);
+       tbc->tb_dev_attrib_cit.ct_attrs = pscsi_backend_dev_attrs;
+
        return transport_subsystem_register(&pscsi_template);
 }
 
index b920db3388cdc19d4bef721a6c6d55680cfd2d8c..60ebd170a561943be8bde26cd40112bbd22d1e8e 100644 (file)
@@ -34,6 +34,7 @@
 
 #include <target/target_core_base.h>
 #include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
 
 #include "target_core_rd.h"
 
@@ -632,6 +633,42 @@ rd_parse_cdb(struct se_cmd *cmd)
        return sbc_parse_cdb(cmd, &rd_sbc_ops);
 }
 
+DEF_TB_DEFAULT_ATTRIBS(rd_mcp);
+
+static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
+       &rd_mcp_dev_attrib_emulate_model_alias.attr,
+       &rd_mcp_dev_attrib_emulate_dpo.attr,
+       &rd_mcp_dev_attrib_emulate_fua_write.attr,
+       &rd_mcp_dev_attrib_emulate_fua_read.attr,
+       &rd_mcp_dev_attrib_emulate_write_cache.attr,
+       &rd_mcp_dev_attrib_emulate_ua_intlck_ctrl.attr,
+       &rd_mcp_dev_attrib_emulate_tas.attr,
+       &rd_mcp_dev_attrib_emulate_tpu.attr,
+       &rd_mcp_dev_attrib_emulate_tpws.attr,
+       &rd_mcp_dev_attrib_emulate_caw.attr,
+       &rd_mcp_dev_attrib_emulate_3pc.attr,
+       &rd_mcp_dev_attrib_pi_prot_type.attr,
+       &rd_mcp_dev_attrib_hw_pi_prot_type.attr,
+       &rd_mcp_dev_attrib_pi_prot_format.attr,
+       &rd_mcp_dev_attrib_enforce_pr_isids.attr,
+       &rd_mcp_dev_attrib_is_nonrot.attr,
+       &rd_mcp_dev_attrib_emulate_rest_reord.attr,
+       &rd_mcp_dev_attrib_force_pr_aptpl.attr,
+       &rd_mcp_dev_attrib_hw_block_size.attr,
+       &rd_mcp_dev_attrib_block_size.attr,
+       &rd_mcp_dev_attrib_hw_max_sectors.attr,
+       &rd_mcp_dev_attrib_fabric_max_sectors.attr,
+       &rd_mcp_dev_attrib_optimal_sectors.attr,
+       &rd_mcp_dev_attrib_hw_queue_depth.attr,
+       &rd_mcp_dev_attrib_queue_depth.attr,
+       &rd_mcp_dev_attrib_max_unmap_lba_count.attr,
+       &rd_mcp_dev_attrib_max_unmap_block_desc_count.attr,
+       &rd_mcp_dev_attrib_unmap_granularity.attr,
+       &rd_mcp_dev_attrib_unmap_granularity_alignment.attr,
+       &rd_mcp_dev_attrib_max_write_same_len.attr,
+       NULL,
+};
+
 static struct se_subsystem_api rd_mcp_template = {
        .name                   = "rd_mcp",
        .inquiry_prod           = "RAMDISK-MCP",
@@ -653,8 +690,12 @@ static struct se_subsystem_api rd_mcp_template = {
 
 int __init rd_module_init(void)
 {
+       struct target_backend_cits *tbc = &rd_mcp_template.tb_cits;
        int ret;
 
+       target_core_setup_sub_cits(&rd_mcp_template);
+       tbc->tb_dev_attrib_cit.ct_attrs = rd_mcp_backend_dev_attrs;
+
        ret = transport_subsystem_register(&rd_mcp_template);
        if (ret < 0) {
                return ret;
index 8d171ff77e75d48f4ba443f5f896c44f6453f292..11bea1952435a397172ce69804a1088567bde03f 100644 (file)
@@ -485,7 +485,7 @@ static sense_reason_t compare_and_write_callback(struct se_cmd *cmd)
        cmd->t_data_nents_orig = cmd->t_data_nents;
        cmd->t_data_nents = 1;
 
-       cmd->sam_task_attr = MSG_HEAD_TAG;
+       cmd->sam_task_attr = TCM_HEAD_TAG;
        cmd->transport_complete_callback = compare_and_write_post;
        /*
         * Now reset ->execute_cmd() to the normal sbc_execute_rw() handler
index bc286a67af7c4e876e0ac6075e8af8fffe2b9c92..1307600fe7264cb55234b6b8e88d8cc15878c799 100644 (file)
@@ -1357,7 +1357,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
                 * Do implicit HEAD_OF_QUEUE processing for INQUIRY.
                 * See spc4r17 section 5.3
                 */
-               cmd->sam_task_attr = MSG_HEAD_TAG;
+               cmd->sam_task_attr = TCM_HEAD_TAG;
                cmd->execute_cmd = spc_emulate_inquiry;
                break;
        case SECURITY_PROTOCOL_IN:
@@ -1391,7 +1391,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size)
                 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS
                 * See spc4r17 section 5.3
                 */
-               cmd->sam_task_attr = MSG_HEAD_TAG;
+               cmd->sam_task_attr = TCM_HEAD_TAG;
                break;
        case TEST_UNIT_READY:
                cmd->execute_cmd = spc_emulate_testunitready;
index be877bf6f7304a88fabb3506d65ac75542471b01..0adc0f6502134eb3292a7a2cab66ac75be4d6406 100644 (file)
@@ -1159,7 +1159,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
        if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
                return 0;
 
-       if (cmd->sam_task_attr == MSG_ACA_TAG) {
+       if (cmd->sam_task_attr == TCM_ACA_TAG) {
                pr_debug("SAM Task Attribute ACA"
                        " emulation is not supported\n");
                return TCM_INVALID_CDB_FIELD;
@@ -1531,7 +1531,7 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
        BUG_ON(!se_tpg);
 
        transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
-                             0, DMA_NONE, MSG_SIMPLE_TAG, sense);
+                             0, DMA_NONE, TCM_SIMPLE_TAG, sense);
        /*
         * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
         * allocation failure.
@@ -1718,12 +1718,12 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
         * to allow the passed struct se_cmd list of tasks to the front of the list.
         */
        switch (cmd->sam_task_attr) {
-       case MSG_HEAD_TAG:
+       case TCM_HEAD_TAG:
                pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
                         "se_ordered_id: %u\n",
                         cmd->t_task_cdb[0], cmd->se_ordered_id);
                return false;
-       case MSG_ORDERED_TAG:
+       case TCM_ORDERED_TAG:
                atomic_inc_mb(&dev->dev_ordered_sync);
 
                pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
@@ -1828,7 +1828,7 @@ static void target_restart_delayed_cmds(struct se_device *dev)
 
                __target_execute_cmd(cmd);
 
-               if (cmd->sam_task_attr == MSG_ORDERED_TAG)
+               if (cmd->sam_task_attr == TCM_ORDERED_TAG)
                        break;
        }
 }
@@ -1844,18 +1844,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
        if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
                return;
 
-       if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
+       if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
                atomic_dec_mb(&dev->simple_cmds);
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
                        " SIMPLE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
-       } else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
+       } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for"
                        " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
                        cmd->se_ordered_id);
-       } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
+       } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
                atomic_dec_mb(&dev->dev_ordered_sync);
 
                dev->dev_cur_ordered_id++;
index 9a1b314f64824e4d6d0dcd3f1dcb56968832de2e..8bfa61c9693dbef6fe6267e16a48c566eae6ac4a 100644 (file)
@@ -28,6 +28,8 @@
 #include <target/target_core_base.h>
 #include <target/target_core_fabric.h>
 #include <target/target_core_backend.h>
+#include <target/target_core_backend_configfs.h>
+
 #include <linux/target_core_user.h>
 
 /*
@@ -1092,6 +1094,42 @@ tcmu_parse_cdb(struct se_cmd *cmd)
        return ret;
 }
 
+DEF_TB_DEFAULT_ATTRIBS(tcmu);
+
+static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
+       &tcmu_dev_attrib_emulate_model_alias.attr,
+       &tcmu_dev_attrib_emulate_dpo.attr,
+       &tcmu_dev_attrib_emulate_fua_write.attr,
+       &tcmu_dev_attrib_emulate_fua_read.attr,
+       &tcmu_dev_attrib_emulate_write_cache.attr,
+       &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
+       &tcmu_dev_attrib_emulate_tas.attr,
+       &tcmu_dev_attrib_emulate_tpu.attr,
+       &tcmu_dev_attrib_emulate_tpws.attr,
+       &tcmu_dev_attrib_emulate_caw.attr,
+       &tcmu_dev_attrib_emulate_3pc.attr,
+       &tcmu_dev_attrib_pi_prot_type.attr,
+       &tcmu_dev_attrib_hw_pi_prot_type.attr,
+       &tcmu_dev_attrib_pi_prot_format.attr,
+       &tcmu_dev_attrib_enforce_pr_isids.attr,
+       &tcmu_dev_attrib_is_nonrot.attr,
+       &tcmu_dev_attrib_emulate_rest_reord.attr,
+       &tcmu_dev_attrib_force_pr_aptpl.attr,
+       &tcmu_dev_attrib_hw_block_size.attr,
+       &tcmu_dev_attrib_block_size.attr,
+       &tcmu_dev_attrib_hw_max_sectors.attr,
+       &tcmu_dev_attrib_fabric_max_sectors.attr,
+       &tcmu_dev_attrib_optimal_sectors.attr,
+       &tcmu_dev_attrib_hw_queue_depth.attr,
+       &tcmu_dev_attrib_queue_depth.attr,
+       &tcmu_dev_attrib_max_unmap_lba_count.attr,
+       &tcmu_dev_attrib_max_unmap_block_desc_count.attr,
+       &tcmu_dev_attrib_unmap_granularity.attr,
+       &tcmu_dev_attrib_unmap_granularity_alignment.attr,
+       &tcmu_dev_attrib_max_write_same_len.attr,
+       NULL,
+};
+
 static struct se_subsystem_api tcmu_template = {
        .name                   = "user",
        .inquiry_prod           = "USER",
@@ -1112,6 +1150,7 @@ static struct se_subsystem_api tcmu_template = {
 
 static int __init tcmu_module_init(void)
 {
+       struct target_backend_cits *tbc = &tcmu_template.tb_cits;
        int ret;
 
        BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
@@ -1134,6 +1173,9 @@ static int __init tcmu_module_init(void)
                goto out_unreg_device;
        }
 
+       target_core_setup_sub_cits(&tcmu_template);
+       tbc->tb_dev_attrib_cit.ct_attrs = tcmu_backend_dev_attrs;
+
        ret = transport_subsystem_register(&tcmu_template);
        if (ret)
                goto out_unreg_genl;
index be0c0d08c56a91ff9acc97e8db92868c39c25f0c..edcafa4490c0850bfc01115357ce4e817773ee72 100644 (file)
@@ -554,17 +554,17 @@ static void ft_send_work(struct work_struct *work)
         */
        switch (fcp->fc_pri_ta & FCP_PTA_MASK) {
        case FCP_PTA_HEADQ:
-               task_attr = MSG_HEAD_TAG;
+               task_attr = TCM_HEAD_TAG;
                break;
        case FCP_PTA_ORDERED:
-               task_attr = MSG_ORDERED_TAG;
+               task_attr = TCM_ORDERED_TAG;
                break;
        case FCP_PTA_ACA:
-               task_attr = MSG_ACA_TAG;
+               task_attr = TCM_ACA_TAG;
                break;
        case FCP_PTA_SIMPLE: /* Fallthrough */
        default:
-               task_attr = MSG_SIMPLE_TAG;
+               task_attr = TCM_SIMPLE_TAG;
        }
 
        fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
index ad09e51ffae4d097109241d9a19b97c97858109b..f65f0d109fc8c015869a78b355478afb153fde5d 100644 (file)
@@ -4,6 +4,8 @@
  *  Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
  *  Copyright (C) 2012  Amit Daniel <amit.kachhap@linaro.org>
  *
+ *  Copyright (C) 2014  Viresh Kumar <viresh.kumar@linaro.org>
+ *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *  This program is free software; you can redistribute it and/or modify
  *  it under the terms of the GNU General Public License as published by
 #include <linux/cpu.h>
 #include <linux/cpu_cooling.h>
 
+/*
+ * Cooling state <-> CPUFreq frequency
+ *
+ * Cooling states are translated to frequencies throughout this driver and this
+ * is the relation between them.
+ *
+ * Highest cooling state corresponds to lowest possible frequency.
+ *
+ * i.e.
+ *     level 0 --> 1st Max Freq
+ *     level 1 --> 2nd Max Freq
+ *     ...
+ */
+
 /**
  * struct cpufreq_cooling_device - data for cooling device with cpufreq
  * @id: unique integer value corresponding to each cpufreq_cooling_device
  *     cooling devices.
  * @cpufreq_val: integer value representing the absolute value of the clipped
  *     frequency.
+ * @max_level: maximum cooling level. One less than total number of valid
+ *     cpufreq frequencies.
  * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
+ * @node: list_head to link all cpufreq_cooling_device together.
  *
- * This structure is required for keeping information of each
- * cpufreq_cooling_device registered. In order to prevent corruption of this a
- * mutex lock cooling_cpufreq_lock is used.
+ * This structure is required for keeping information of each registered
+ * cpufreq_cooling_device.
  */
 struct cpufreq_cooling_device {
        int id;
        struct thermal_cooling_device *cool_dev;
        unsigned int cpufreq_state;
        unsigned int cpufreq_val;
+       unsigned int max_level;
+       unsigned int *freq_table;       /* In descending order */
        struct cpumask allowed_cpus;
        struct list_head node;
 };
 static DEFINE_IDR(cpufreq_idr);
 static DEFINE_MUTEX(cooling_cpufreq_lock);
 
-static unsigned int cpufreq_dev_count;
-
 static LIST_HEAD(cpufreq_dev_list);
 
 /**
@@ -98,120 +116,30 @@ static void release_idr(struct idr *idr, int id)
 /* Below code defines functions to be used for cpufreq as cooling device */
 
 /**
- * is_cpufreq_valid - function to check frequency transitioning capability.
- * @cpu: cpu for which check is needed.
+ * get_level: Find the level for a particular frequency
+ * @cpufreq_dev: cpufreq_dev for which the property is required
+ * @freq: Frequency
  *
- * This function will check the current state of the system if
- * it is capable of changing the frequency for a given @cpu.
- *
- * Return: 0 if the system is not currently capable of changing
- * the frequency of given cpu. !0 in case the frequency is changeable.
+ * Return: level on success, THERMAL_CSTATE_INVALID on error.
  */
-static int is_cpufreq_valid(int cpu)
+static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
+                              unsigned int freq)
 {
-       struct cpufreq_policy policy;
-
-       return !cpufreq_get_policy(&policy, cpu);
-}
-
-enum cpufreq_cooling_property {
-       GET_LEVEL,
-       GET_FREQ,
-       GET_MAXL,
-};
-
-/**
- * get_property - fetch a property of interest for a give cpu.
- * @cpu: cpu for which the property is required
- * @input: query parameter
- * @output: query return
- * @property: type of query (frequency, level, max level)
- *
- * This is the common function to
- * 1. get maximum cpu cooling states
- * 2. translate frequency to cooling state
- * 3. translate cooling state to frequency
- * Note that the code may be not in good shape
- * but it is written in this way in order to:
- * a) reduce duplicate code as most of the code can be shared.
- * b) make sure the logic is consistent when translating between
- *    cooling states and frequencies.
- *
- * Return: 0 on success, -EINVAL when invalid parameters are passed.
- */
-static int get_property(unsigned int cpu, unsigned long input,
-                       unsigned int *output,
-                       enum cpufreq_cooling_property property)
-{
-       int i;
-       unsigned long max_level = 0, level = 0;
-       unsigned int freq = CPUFREQ_ENTRY_INVALID;
-       int descend = -1;
-       struct cpufreq_frequency_table *pos, *table =
-                                       cpufreq_frequency_get_table(cpu);
-
-       if (!output)
-               return -EINVAL;
-
-       if (!table)
-               return -EINVAL;
-
-       cpufreq_for_each_valid_entry(pos, table) {
-               /* ignore duplicate entry */
-               if (freq == pos->frequency)
-                       continue;
-
-               /* get the frequency order */
-               if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
-                       descend = freq > pos->frequency;
-
-               freq = pos->frequency;
-               max_level++;
-       }
-
-       /* No valid cpu frequency entry */
-       if (max_level == 0)
-               return -EINVAL;
+       unsigned long level;
 
-       /* max_level is an index, not a counter */
-       max_level--;
+       for (level = 0; level <= cpufreq_dev->max_level; level++) {
+               if (freq == cpufreq_dev->freq_table[level])
+                       return level;
 
-       /* get max level */
-       if (property == GET_MAXL) {
-               *output = (unsigned int)max_level;
-               return 0;
+               if (freq > cpufreq_dev->freq_table[level])
+                       break;
        }
 
-       if (property == GET_FREQ)
-               level = descend ? input : (max_level - input);
-
-       i = 0;
-       cpufreq_for_each_valid_entry(pos, table) {
-               /* ignore duplicate entry */
-               if (freq == pos->frequency)
-                       continue;
-
-               /* now we have a valid frequency entry */
-               freq = pos->frequency;
-
-               if (property == GET_LEVEL && (unsigned int)input == freq) {
-                       /* get level by frequency */
-                       *output = descend ? i : (max_level - i);
-                       return 0;
-               }
-               if (property == GET_FREQ && level == i) {
-                       /* get frequency by level */
-                       *output = freq;
-                       return 0;
-               }
-               i++;
-       }
-
-       return -EINVAL;
+       return THERMAL_CSTATE_INVALID;
 }
 
 /**
- * cpufreq_cooling_get_level - for a give cpu, return the cooling level.
+ * cpufreq_cooling_get_level - for a given cpu, return the cooling level.
  * @cpu: cpu for which the level is required
  * @freq: the frequency of interest
  *
@@ -223,77 +151,21 @@ static int get_property(unsigned int cpu, unsigned long input,
  */
 unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
 {
-       unsigned int val;
-
-       if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
-               return THERMAL_CSTATE_INVALID;
-
-       return (unsigned long)val;
-}
-EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
-
-/**
- * get_cpu_frequency - get the absolute value of frequency from level.
- * @cpu: cpu for which frequency is fetched.
- * @level: cooling level
- *
- * This function matches cooling level with frequency. Based on a cooling level
- * of frequency, equals cooling state of cpu cooling device, it will return
- * the corresponding frequency.
- *     e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
- *
- * Return: 0 on error, the corresponding frequency otherwise.
- */
-static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
-{
-       int ret = 0;
-       unsigned int freq;
-
-       ret = get_property(cpu, level, &freq, GET_FREQ);
-       if (ret)
-               return 0;
-
-       return freq;
-}
-
-/**
- * cpufreq_apply_cooling - function to apply frequency clipping.
- * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
- *     clipping data.
- * @cooling_state: value of the cooling state.
- *
- * Function used to make sure the cpufreq layer is aware of current thermal
- * limits. The limits are applied by updating the cpufreq policy.
- *
- * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
- * cooling state).
- */
-static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
-                                unsigned long cooling_state)
-{
-       unsigned int cpuid, clip_freq;
-       struct cpumask *mask = &cpufreq_device->allowed_cpus;
-       unsigned int cpu = cpumask_any(mask);
-
-
-       /* Check if the old cooling action is same as new cooling action */
-       if (cpufreq_device->cpufreq_state == cooling_state)
-               return 0;
-
-       clip_freq = get_cpu_frequency(cpu, cooling_state);
-       if (!clip_freq)
-               return -EINVAL;
-
-       cpufreq_device->cpufreq_state = cooling_state;
-       cpufreq_device->cpufreq_val = clip_freq;
+       struct cpufreq_cooling_device *cpufreq_dev;
 
-       for_each_cpu(cpuid, mask) {
-               if (is_cpufreq_valid(cpuid))
-                       cpufreq_update_policy(cpuid);
+       mutex_lock(&cooling_cpufreq_lock);
+       list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+               if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
+                       mutex_unlock(&cooling_cpufreq_lock);
+                       return get_level(cpufreq_dev, freq);
+               }
        }
+       mutex_unlock(&cooling_cpufreq_lock);
 
-       return 0;
+       pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
+       return THERMAL_CSTATE_INVALID;
 }
+EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
 
 /**
  * cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
@@ -323,11 +195,6 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
                                        &cpufreq_dev->allowed_cpus))
                        continue;
 
-               if (!cpufreq_dev->cpufreq_val)
-                       cpufreq_dev->cpufreq_val = get_cpu_frequency(
-                                       cpumask_any(&cpufreq_dev->allowed_cpus),
-                                       cpufreq_dev->cpufreq_state);
-
                max_freq = cpufreq_dev->cpufreq_val;
 
                if (policy->max != max_freq)
@@ -354,19 +221,9 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
                                 unsigned long *state)
 {
        struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
-       struct cpumask *mask = &cpufreq_device->allowed_cpus;
-       unsigned int cpu;
-       unsigned int count = 0;
-       int ret;
-
-       cpu = cpumask_any(mask);
-
-       ret = get_property(cpu, 0, &count, GET_MAXL);
 
-       if (count > 0)
-               *state = count;
-
-       return ret;
+       *state = cpufreq_device->max_level;
+       return 0;
 }
 
 /**
@@ -403,8 +260,24 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
                                 unsigned long state)
 {
        struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+       unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
+       unsigned int clip_freq;
+
+       /* Request state should be less than max_level */
+       if (WARN_ON(state > cpufreq_device->max_level))
+               return -EINVAL;
+
+       /* Check if the old cooling action is same as new cooling action */
+       if (cpufreq_device->cpufreq_state == state)
+               return 0;
 
-       return cpufreq_apply_cooling(cpufreq_device, state);
+       clip_freq = cpufreq_device->freq_table[state];
+       cpufreq_device->cpufreq_state = state;
+       cpufreq_device->cpufreq_val = clip_freq;
+
+       cpufreq_update_policy(cpu);
+
+       return 0;
 }
 
 /* Bind cpufreq callbacks to thermal cooling device ops */
@@ -419,10 +292,25 @@ static struct notifier_block thermal_cpufreq_notifier_block = {
        .notifier_call = cpufreq_thermal_notifier,
 };
 
+static unsigned int find_next_max(struct cpufreq_frequency_table *table,
+                                 unsigned int prev_max)
+{
+       struct cpufreq_frequency_table *pos;
+       unsigned int max = 0;
+
+       cpufreq_for_each_valid_entry(pos, table) {
+               if (pos->frequency > max && pos->frequency < prev_max)
+                       max = pos->frequency;
+       }
+
+       return max;
+}
+
 /**
  * __cpufreq_cooling_register - helper function to create cpufreq cooling device
  * @np: a valid struct device_node to the cooling device device tree node
  * @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ * Normally this should be same as cpufreq policy->related_cpus.
  *
  * This interface function registers the cpufreq cooling device with the name
  * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -437,37 +325,42 @@ __cpufreq_cooling_register(struct device_node *np,
                           const struct cpumask *clip_cpus)
 {
        struct thermal_cooling_device *cool_dev;
-       struct cpufreq_cooling_device *cpufreq_dev = NULL;
-       unsigned int min = 0, max = 0;
+       struct cpufreq_cooling_device *cpufreq_dev;
        char dev_name[THERMAL_NAME_LENGTH];
-       int ret = 0, i;
-       struct cpufreq_policy policy;
+       struct cpufreq_frequency_table *pos, *table;
+       unsigned int freq, i;
+       int ret;
 
-       /* Verify that all the clip cpus have same freq_min, freq_max limit */
-       for_each_cpu(i, clip_cpus) {
-               /* continue if cpufreq policy not found and not return error */
-               if (!cpufreq_get_policy(&policy, i))
-                       continue;
-               if (min == 0 && max == 0) {
-                       min = policy.cpuinfo.min_freq;
-                       max = policy.cpuinfo.max_freq;
-               } else {
-                       if (min != policy.cpuinfo.min_freq ||
-                           max != policy.cpuinfo.max_freq)
-                               return ERR_PTR(-EINVAL);
-               }
+       table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
+       if (!table) {
+               pr_debug("%s: CPUFreq table not found\n", __func__);
+               return ERR_PTR(-EPROBE_DEFER);
        }
-       cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
-                             GFP_KERNEL);
+
+       cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
        if (!cpufreq_dev)
                return ERR_PTR(-ENOMEM);
 
+       /* Find max levels */
+       cpufreq_for_each_valid_entry(pos, table)
+               cpufreq_dev->max_level++;
+
+       cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
+                                         cpufreq_dev->max_level, GFP_KERNEL);
+       if (!cpufreq_dev->freq_table) {
+               cool_dev = ERR_PTR(-ENOMEM);
+               goto free_cdev;
+       }
+
+       /* max_level is an index, not a counter */
+       cpufreq_dev->max_level--;
+
        cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
 
        ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
        if (ret) {
-               kfree(cpufreq_dev);
-               return ERR_PTR(-EINVAL);
+               cool_dev = ERR_PTR(ret);
+               goto free_table;
        }
 
        snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
@@ -475,24 +368,43 @@ __cpufreq_cooling_register(struct device_node *np,
 
        cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
                                                      &cpufreq_cooling_ops);
-       if (IS_ERR(cool_dev)) {
-               release_idr(&cpufreq_idr, cpufreq_dev->id);
-               kfree(cpufreq_dev);
-               return cool_dev;
+       if (IS_ERR(cool_dev))
+               goto remove_idr;
+
+       /* Fill freq-table in descending order of frequencies */
+       for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
+               freq = find_next_max(table, freq);
+               cpufreq_dev->freq_table[i] = freq;
+
+               /* Warn for duplicate entries */
+               if (!freq)
+                       pr_warn("%s: table has duplicate entries\n", __func__);
+               else
+                       pr_debug("%s: freq:%u KHz\n", __func__, freq);
        }
+
+       cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
        cpufreq_dev->cool_dev = cool_dev;
-       cpufreq_dev->cpufreq_state = 0;
+
        mutex_lock(&cooling_cpufreq_lock);
 
        /* Register the notifier for first cpufreq cooling device */
-       if (cpufreq_dev_count == 0)
+       if (list_empty(&cpufreq_dev_list))
                cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
                                          CPUFREQ_POLICY_NOTIFIER);
-       cpufreq_dev_count++;
        list_add(&cpufreq_dev->node, &cpufreq_dev_list);
 
        mutex_unlock(&cooling_cpufreq_lock);
 
+       return cool_dev;
+
+remove_idr:
+       release_idr(&cpufreq_idr, cpufreq_dev->id);
+free_table:
+       kfree(cpufreq_dev->freq_table);
+free_cdev:
+       kfree(cpufreq_dev);
+
        return cool_dev;
 }
 
@@ -554,16 +466,16 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
        cpufreq_dev = cdev->devdata;
        mutex_lock(&cooling_cpufreq_lock);
        list_del(&cpufreq_dev->node);
-       cpufreq_dev_count--;
 
        /* Unregister the notifier for the last cpufreq cooling device */
-       if (cpufreq_dev_count == 0)
+       if (list_empty(&cpufreq_dev_list))
                cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
                                            CPUFREQ_POLICY_NOTIFIER);
        mutex_unlock(&cooling_cpufreq_lock);
 
        thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
        release_idr(&cpufreq_idr, cpufreq_dev->id);
+       kfree(cpufreq_dev->freq_table);
        kfree(cpufreq_dev);
 }
 EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
index 000d53e934a0600b7570de851dc08510240208d1..607b62c7e6114cc005ccf597e4f0ccb804e0e99b 100644 (file)
@@ -18,7 +18,6 @@
  */
 
 #include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
 #include <linux/err.h>
 #include <linux/module.h>
 #include <linux/of.h>
 static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
 {
        struct thermal_cooling_device *cdev;
-       struct cpumask mask_val;
-
-       /* make sure cpufreq driver has been initialized */
-       if (!cpufreq_frequency_get_table(0))
-               return -EPROBE_DEFER;
-
-       cpumask_set_cpu(0, &mask_val);
-       cdev = cpufreq_cooling_register(&mask_val);
 
+       cdev = cpufreq_cooling_register(cpu_present_mask);
        if (IS_ERR(cdev)) {
-               dev_err(&pdev->dev, "Failed to register cooling device\n");
-               return PTR_ERR(cdev);
+               int ret = PTR_ERR(cdev);
+
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "Failed to register cooling device %d\n",
+                               ret);
+                               
+               return ret;
        }
 
        platform_set_drvdata(pdev, cdev);
index 88b32f942dcf72839304dc32db3c10ccf681ee73..c1188ac053c9650ce163a3e9e37f34a877959dc2 100644 (file)
@@ -9,7 +9,6 @@
 
 #include <linux/clk.h>
 #include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
 #include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/init.h>
@@ -454,15 +453,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
        const struct of_device_id *of_id =
                of_match_device(of_imx_thermal_match, &pdev->dev);
        struct imx_thermal_data *data;
-       struct cpumask clip_cpus;
        struct regmap *map;
        int measure_freq;
        int ret;
 
-       if (!cpufreq_get_current_driver()) {
-               dev_dbg(&pdev->dev, "no cpufreq driver!");
-               return -EPROBE_DEFER;
-       }
        data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
        if (!data)
                return -ENOMEM;
@@ -516,12 +510,13 @@ static int imx_thermal_probe(struct platform_device *pdev)
        regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF);
        regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
 
-       cpumask_set_cpu(0, &clip_cpus);
-       data->cdev = cpufreq_cooling_register(&clip_cpus);
+       data->cdev = cpufreq_cooling_register(cpu_present_mask);
        if (IS_ERR(data->cdev)) {
                ret = PTR_ERR(data->cdev);
-               dev_err(&pdev->dev,
-                       "failed to register cpufreq cooling device: %d\n", ret);
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "failed to register cpufreq cooling device: %d\n",
+                               ret);
                return ret;
        }
 
index ffe40bffaf1a2e88acf818b4f611ca7a9e57f42f..d4413698a85f9738d226d5f82793e9d3c6f46abe 100644 (file)
@@ -1,4 +1,5 @@
 obj-$(CONFIG_INT340X_THERMAL)  += int3400_thermal.o
 obj-$(CONFIG_INT340X_THERMAL)  += int3402_thermal.o
 obj-$(CONFIG_INT340X_THERMAL)  += int3403_thermal.o
+obj-$(CONFIG_INT340X_THERMAL)  += processor_thermal_device.o
 obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
index e4e61b3fb11e8b101abbf86cd1b58f0ed5ffb235..231cabc16e160e7318b4339a9d3d5210ff8b2ec3 100644 (file)
@@ -82,7 +82,7 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
        struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" };
 
        if (!acpi_has_method(handle, "_TRT"))
-               return 0;
+               return -ENODEV;
 
        status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer);
        if (ACPI_FAILURE(status))
@@ -167,7 +167,7 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
                sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" };
 
        if (!acpi_has_method(handle, "_ART"))
-               return 0;
+               return -ENODEV;
 
        status = acpi_evaluate_object(handle, "_ART", NULL, &buffer);
        if (ACPI_FAILURE(status))
@@ -321,8 +321,8 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
        unsigned long length = 0;
        int count = 0;
        char __user *arg = (void __user *)__arg;
-       struct trt *trts;
-       struct art *arts;
+       struct trt *trts = NULL;
+       struct art *arts = NULL;
 
        switch (cmd) {
        case ACPI_THERMAL_GET_TRT_COUNT:
index dcb306ea14a49008be5df0e3abb521e896d9195e..65a98a97df071cdf343776bc1e959dc9808dbbc4 100644 (file)
@@ -335,7 +335,6 @@ static struct platform_driver int3400_thermal_driver = {
        .remove = int3400_thermal_remove,
        .driver = {
                   .name = "int3400 thermal",
-                  .owner = THIS_MODULE,
                   .acpi_match_table = ACPI_PTR(int3400_thermal_match),
                   },
 };
index a5d08c14ba24a79654fdd1445c90cbcc00db63b9..c5cbc3af3a0539260218492bc5aaa6199d8d5307 100644 (file)
@@ -231,7 +231,6 @@ static struct platform_driver int3402_thermal_driver = {
        .remove = int3402_thermal_remove,
        .driver = {
                   .name = "int3402 thermal",
-                  .owner = THIS_MODULE,
                   .acpi_match_table = int3402_thermal_match,
                   },
 };
index 1bfa6a69e77a15a8d021cb3ed33760b212ba9ce9..0faf500d8a77874d7c1b6c8a1b3e1195fc9e8065 100644 (file)
@@ -301,6 +301,8 @@ static int int3403_sensor_remove(struct int3403_priv *priv)
 {
        struct int3403_sensor *obj = priv->priv;
 
+       acpi_remove_notify_handler(priv->adev->handle,
+                                  ACPI_DEVICE_NOTIFY, int3403_notify);
        thermal_zone_device_unregister(obj->tzone);
        return 0;
 }
@@ -369,6 +371,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
        p = buf.pointer;
        if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
                printk(KERN_WARNING "Invalid PPSS data\n");
+               kfree(buf.pointer);
                return -EFAULT;
        }
 
@@ -381,6 +384,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
 
        priv->priv = obj;
 
+       kfree(buf.pointer);
        /* TODO: add ACPI notification support */
 
        return result;
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
new file mode 100644 (file)
index 0000000..31bb553
--- /dev/null
@@ -0,0 +1,309 @@
+/*
+ * processor_thermal_device.c
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+
+/* Broadwell-U/HSB thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
+#define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03
+
+/* Braswell thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
+
+struct power_config {
+       u32     index;
+       u32     min_uw;
+       u32     max_uw;
+       u32     tmin_us;
+       u32     tmax_us;
+       u32     step_uw;
+};
+
+struct proc_thermal_device {
+       struct device *dev;
+       struct acpi_device *adev;
+       struct power_config power_limits[2];
+};
+
+enum proc_thermal_emum_mode_type {
+       PROC_THERMAL_NONE,
+       PROC_THERMAL_PCI,
+       PROC_THERMAL_PLATFORM_DEV
+};
+
+/*
+ * We can have only one type of enumeration, PCI or Platform,
+ * not both. So we don't need instance specific data.
+ */
+static enum proc_thermal_emum_mode_type proc_thermal_emum_mode =
+                                                       PROC_THERMAL_NONE;
+
+#define POWER_LIMIT_SHOW(index, suffix) \
+static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
+                                       struct device_attribute *attr, \
+                                       char *buf) \
+{ \
+       struct pci_dev *pci_dev; \
+       struct platform_device *pdev; \
+       struct proc_thermal_device *proc_dev; \
+\
+       if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
+               pdev = to_platform_device(dev); \
+               proc_dev = platform_get_drvdata(pdev); \
+       } else { \
+               pci_dev = to_pci_dev(dev); \
+               proc_dev = pci_get_drvdata(pci_dev); \
+       } \
+       return sprintf(buf, "%lu\n",\
+       (unsigned long)proc_dev->power_limits[index].suffix * 1000); \
+}
+
+POWER_LIMIT_SHOW(0, min_uw)
+POWER_LIMIT_SHOW(0, max_uw)
+POWER_LIMIT_SHOW(0, step_uw)
+POWER_LIMIT_SHOW(0, tmin_us)
+POWER_LIMIT_SHOW(0, tmax_us)
+
+POWER_LIMIT_SHOW(1, min_uw)
+POWER_LIMIT_SHOW(1, max_uw)
+POWER_LIMIT_SHOW(1, step_uw)
+POWER_LIMIT_SHOW(1, tmin_us)
+POWER_LIMIT_SHOW(1, tmax_us)
+
+static DEVICE_ATTR_RO(power_limit_0_min_uw);
+static DEVICE_ATTR_RO(power_limit_0_max_uw);
+static DEVICE_ATTR_RO(power_limit_0_step_uw);
+static DEVICE_ATTR_RO(power_limit_0_tmin_us);
+static DEVICE_ATTR_RO(power_limit_0_tmax_us);
+
+static DEVICE_ATTR_RO(power_limit_1_min_uw);
+static DEVICE_ATTR_RO(power_limit_1_max_uw);
+static DEVICE_ATTR_RO(power_limit_1_step_uw);
+static DEVICE_ATTR_RO(power_limit_1_tmin_us);
+static DEVICE_ATTR_RO(power_limit_1_tmax_us);
+
+static struct attribute *power_limit_attrs[] = {
+       &dev_attr_power_limit_0_min_uw.attr,
+       &dev_attr_power_limit_1_min_uw.attr,
+       &dev_attr_power_limit_0_max_uw.attr,
+       &dev_attr_power_limit_1_max_uw.attr,
+       &dev_attr_power_limit_0_step_uw.attr,
+       &dev_attr_power_limit_1_step_uw.attr,
+       &dev_attr_power_limit_0_tmin_us.attr,
+       &dev_attr_power_limit_1_tmin_us.attr,
+       &dev_attr_power_limit_0_tmax_us.attr,
+       &dev_attr_power_limit_1_tmax_us.attr,
+       NULL
+};
+
+static struct attribute_group power_limit_attribute_group = {
+       .attrs = power_limit_attrs,
+       .name = "power_limits"
+};
+
+static int proc_thermal_add(struct device *dev,
+                           struct proc_thermal_device **priv)
+{
+       struct proc_thermal_device *proc_priv;
+       struct acpi_device *adev;
+       acpi_status status;
+       struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+       union acpi_object *elements, *ppcc;
+       union acpi_object *p;
+       int i;
+       int ret;
+
+       adev = ACPI_COMPANION(dev);
+
+       status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
+       if (ACPI_FAILURE(status))
+               return -ENODEV;
+
+       p = buf.pointer;
+       if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
+               dev_err(dev, "Invalid PPCC data\n");
+               ret = -EFAULT;
+               goto free_buffer;
+       }
+       if (!p->package.count) {
+               dev_err(dev, "Invalid PPCC package size\n");
+               ret = -EFAULT;
+               goto free_buffer;
+       }
+
+       proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
+       if (!proc_priv) {
+               ret = -ENOMEM;
+               goto free_buffer;
+       }
+
+       proc_priv->dev = dev;
+       proc_priv->adev = adev;
+
+       for (i = 0; i < min((int)p->package.count - 1, 2); ++i) {
+               elements = &(p->package.elements[i+1]);
+               if (elements->type != ACPI_TYPE_PACKAGE ||
+                   elements->package.count != 6) {
+                       ret = -EFAULT;
+                       goto free_buffer;
+               }
+               ppcc = elements->package.elements;
+               proc_priv->power_limits[i].index = ppcc[0].integer.value;
+               proc_priv->power_limits[i].min_uw = ppcc[1].integer.value;
+               proc_priv->power_limits[i].max_uw = ppcc[2].integer.value;
+               proc_priv->power_limits[i].tmin_us = ppcc[3].integer.value;
+               proc_priv->power_limits[i].tmax_us = ppcc[4].integer.value;
+               proc_priv->power_limits[i].step_uw = ppcc[5].integer.value;
+       }
+
+       *priv = proc_priv;
+
+       ret = sysfs_create_group(&dev->kobj,
+                                &power_limit_attribute_group);
+
+free_buffer:
+       kfree(buf.pointer);
+
+       return ret;
+}
+
+void proc_thermal_remove(struct proc_thermal_device *proc_priv)
+{
+       sysfs_remove_group(&proc_priv->dev->kobj,
+                          &power_limit_attribute_group);
+}
+
+static int int3401_add(struct platform_device *pdev)
+{
+       struct proc_thermal_device *proc_priv;
+       int ret;
+
+       if (proc_thermal_emum_mode == PROC_THERMAL_PCI) {
+               dev_err(&pdev->dev, "error: enumerated as PCI dev\n");
+               return -ENODEV;
+       }
+
+       ret = proc_thermal_add(&pdev->dev, &proc_priv);
+       if (ret)
+               return ret;
+
+       platform_set_drvdata(pdev, proc_priv);
+       proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
+
+       return 0;
+}
+
+static int int3401_remove(struct platform_device *pdev)
+{
+       proc_thermal_remove(platform_get_drvdata(pdev));
+
+       return 0;
+}
+
+static int  proc_thermal_pci_probe(struct pci_dev *pdev,
+                                  const struct pci_device_id *unused)
+{
+       struct proc_thermal_device *proc_priv;
+       int ret;
+
+       if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) {
+               dev_err(&pdev->dev, "error: enumerated as platform dev\n");
+               return -ENODEV;
+       }
+
+       ret = pci_enable_device(pdev);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "error: could not enable device\n");
+               return ret;
+       }
+
+       ret = proc_thermal_add(&pdev->dev, &proc_priv);
+       if (ret) {
+               pci_disable_device(pdev);
+               return ret;
+       }
+
+       pci_set_drvdata(pdev, proc_priv);
+       proc_thermal_emum_mode = PROC_THERMAL_PCI;
+
+       return 0;
+}
+
+static void  proc_thermal_pci_remove(struct pci_dev *pdev)
+{
+       proc_thermal_remove(pci_get_drvdata(pdev));
+       pci_disable_device(pdev);
+}
+
+static const struct pci_device_id proc_thermal_pci_ids[] = {
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)},
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)},
+       { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)},
+       { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
+
+static struct pci_driver proc_thermal_pci_driver = {
+       .name           = "proc_thermal",
+       .probe          = proc_thermal_pci_probe,
+       .remove         = proc_thermal_pci_remove,
+       .id_table       = proc_thermal_pci_ids,
+};
+
+static const struct acpi_device_id int3401_device_ids[] = {
+       {"INT3401", 0},
+       {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3401_device_ids);
+
+static struct platform_driver int3401_driver = {
+       .probe = int3401_add,
+       .remove = int3401_remove,
+       .driver = {
+               .name = "int3401 thermal",
+               .acpi_match_table = int3401_device_ids,
+       },
+};
+
+static int __init proc_thermal_init(void)
+{
+       int ret;
+
+       ret = platform_driver_register(&int3401_driver);
+       if (ret)
+               return ret;
+
+       ret = pci_register_driver(&proc_thermal_pci_driver);
+
+       return ret;
+}
+
+static void __exit proc_thermal_exit(void)
+{
+       platform_driver_unregister(&int3401_driver);
+       pci_unregister_driver(&proc_thermal_pci_driver);
+}
+
+module_init(proc_thermal_init);
+module_exit(proc_thermal_exit);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
+MODULE_LICENSE("GPL v2");
index b46c706e1cacf1027657fda668dfdd4057f6319e..6ceebd659dd400423c0640b1d0911da36b441b74 100644 (file)
@@ -435,7 +435,6 @@ static int clamp_thread(void *arg)
                 * allowed. thus jiffies are updated properly.
                 */
                preempt_disable();
-               tick_nohz_idle_enter();
                /* mwait until target jiffies is reached */
                while (time_before(jiffies, target_jiffies)) {
                        unsigned long ecx = 1;
@@ -451,7 +450,6 @@ static int clamp_thread(void *arg)
                        start_critical_timings();
                        atomic_inc(&idle_wakeup_counter);
                }
-               tick_nohz_idle_exit();
                preempt_enable();
        }
        del_timer_sync(&wakeup_timer);
@@ -690,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
        { X86_VENDOR_INTEL, 6, 0x45},
        { X86_VENDOR_INTEL, 6, 0x46},
        { X86_VENDOR_INTEL, 6, 0x4c},
+       { X86_VENDOR_INTEL, 6, 0x56},
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
index 1bcddfc60e915e9e6f97594671076330af4adb5a..9c6ce548e36312f95ca49f6352cf4999a1ab0fe0 100644 (file)
@@ -677,7 +677,6 @@ static SIMPLE_DEV_PM_OPS(rockchip_thermal_pm_ops,
 static struct platform_driver rockchip_thermal_driver = {
        .driver = {
                .name = "rockchip-thermal",
-               .owner = THIS_MODULE,
                .pm = &rockchip_thermal_pm_ops,
                .of_match_table = of_rockchip_thermal_match,
        },
index f760389a204c673738e933c91ace4e742b735811..c43306ecc0abbb111dc4c6bfdda5201b64ae1738 100644 (file)
@@ -1,6 +1,6 @@
 config EXYNOS_THERMAL
        tristate "Exynos thermal management unit driver"
-       depends on ARCH_HAS_BANDGAP && OF
+       depends on OF
        help
          If you say yes here you get support for the TMU (Thermal Management
          Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
index b6be572704a4c7ff97055f1cb273ff3016399469..6dc3815cc73f514c71d16fab40552609fa12ba6d 100644 (file)
@@ -347,7 +347,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
 int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
 {
        int ret;
-       struct cpumask mask_val;
        struct exynos_thermal_zone *th_zone;
 
        if (!sensor_conf || !sensor_conf->read_temperature) {
@@ -367,13 +366,14 @@ int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
         *       sensor
         */
        if (sensor_conf->cooling_data.freq_clip_count > 0) {
-               cpumask_set_cpu(0, &mask_val);
                th_zone->cool_dev[th_zone->cool_dev_size] =
-                                       cpufreq_cooling_register(&mask_val);
+                               cpufreq_cooling_register(cpu_present_mask);
                if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
-                       dev_err(sensor_conf->dev,
-                               "Failed to register cpufreq cooling device\n");
-                       ret = -EINVAL;
+                       ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]);
+                       if (ret != -EPROBE_DEFER)
+                               dev_err(sensor_conf->dev,
+                                       "Failed to register cpufreq cooling device: %d\n",
+                                       ret);
                        goto err_unregister;
                }
                th_zone->cool_dev_size++;
index d44d91d681d4333055526c28ff461e73ed709d0c..d2f1e62a42328095a35efb25ca461875e9f87c9f 100644 (file)
@@ -927,7 +927,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
        /* Register the sensor with thermal management interface */
        ret = exynos_register_thermal(sensor_conf);
        if (ret) {
-               dev_err(&pdev->dev, "Failed to register thermal interface\n");
+               if (ret != -EPROBE_DEFER)
+                       dev_err(&pdev->dev,
+                               "Failed to register thermal interface: %d\n",
+                               ret);
                goto err_clk;
        }
        data->reg_conf = sensor_conf;
index 84fdf0792e27cf57c979288a6a9b79543628495f..87e0b0782023cb37696a92150d9f0c10bd09b198 100644 (file)
@@ -930,7 +930,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
        struct thermal_zone_device *pos1;
        struct thermal_cooling_device *pos2;
        unsigned long max_state;
-       int result;
+       int result, ret;
 
        if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE))
                return -EINVAL;
@@ -947,7 +947,9 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
        if (tz != pos1 || cdev != pos2)
                return -EINVAL;
 
-       cdev->ops->get_max_state(cdev, &max_state);
+       ret = cdev->ops->get_max_state(cdev, &max_state);
+       if (ret)
+               return ret;
 
        /* lower default 0, upper default max_state */
        lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
index 5fd03865e396e373d20e3c8e2dd54c79a26955ee..3fb054a10f6a0fde450e29a98ee4cdf90e18f6c7 100644 (file)
@@ -28,7 +28,6 @@
 #include <linux/kernel.h>
 #include <linux/workqueue.h>
 #include <linux/thermal.h>
-#include <linux/cpufreq.h>
 #include <linux/cpumask.h>
 #include <linux/cpu_cooling.h>
 #include <linux/of.h>
@@ -407,17 +406,17 @@ int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id)
        if (!data)
                return -EINVAL;
 
-       if (!cpufreq_get_current_driver()) {
-               dev_dbg(bgp->dev, "no cpufreq driver yet\n");
-               return -EPROBE_DEFER;
-       }
-
        /* Register cooling device */
        data->cool_dev = cpufreq_cooling_register(cpu_present_mask);
        if (IS_ERR(data->cool_dev)) {
-               dev_err(bgp->dev,
-                       "Failed to register cpufreq cooling device\n");
-               return PTR_ERR(data->cool_dev);
+               int ret = PTR_ERR(data->cool_dev);
+
+               if (ret != -EPROBE_DEFER)
+                       dev_err(bgp->dev,
+                               "Failed to register cpu cooling device %d\n",
+                               ret);
+
+               return ret;
        }
        ti_bandgap_set_sensor_data(bgp, id, data);
 
index 336602eb453eb7e419f89168958153c7a9477d85..96b69bfd773f025f6e86cff1cbfb1e2566c427ce 100644 (file)
@@ -561,7 +561,7 @@ static int omap_8250_startup(struct uart_port *port)
        if (ret)
                goto err;
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
        up->capabilities |= UART_CAP_RPM;
 #endif
 
@@ -997,12 +997,12 @@ static int omap8250_probe(struct platform_device *pdev)
        up.port.fifosize = 64;
        up.tx_loadsz = 64;
        up.capabilities = UART_CAP_FIFO;
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
        /*
-        * PM_RUNTIME is mostly transparent. However to do it right we need to a
+        * Runtime PM is mostly transparent. However to do it right we need to a
         * TX empty interrupt before we can put the device to auto idle. So if
-        * PM_RUNTIME is not enabled we don't add that flag and can spare that
-        * one extra interrupt in the TX path.
+        * PM is not enabled we don't add that flag and can spare that one extra
+        * interrupt in the TX path.
         */
        up.capabilities |= UART_CAP_RPM;
 #endif
@@ -1105,7 +1105,7 @@ static int omap8250_remove(struct platform_device *pdev)
        return 0;
 }
 
-#if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME)
+#ifdef CONFIG_PM
 
 static inline void omap8250_enable_wakeirq(struct omap8250_priv *priv,
                                           bool enable)
@@ -1179,7 +1179,7 @@ static int omap8250_resume(struct device *dev)
 #define omap8250_complete NULL
 #endif
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
 static int omap8250_lost_context(struct uart_8250_port *up)
 {
        u32 val;
index 024f58475a94a953a2c1171a5efdb5b8eb201fbd..3a494168661e40c9f20e812bc1473620711aebb5 100644 (file)
@@ -1131,19 +1131,19 @@ static int usbg_submit_command(struct f_uas *fu,
 
        switch (cmd_iu->prio_attr & 0x7) {
        case UAS_HEAD_TAG:
-               cmd->prio_attr = MSG_HEAD_TAG;
+               cmd->prio_attr = TCM_HEAD_TAG;
                break;
        case UAS_ORDERED_TAG:
-               cmd->prio_attr = MSG_ORDERED_TAG;
+               cmd->prio_attr = TCM_ORDERED_TAG;
                break;
        case UAS_ACA:
-               cmd->prio_attr = MSG_ACA_TAG;
+               cmd->prio_attr = TCM_ACA_TAG;
                break;
        default:
                pr_debug_once("Unsupported prio_attr: %02x.\n",
                                cmd_iu->prio_attr);
        case UAS_SIMPLE_TAG:
-               cmd->prio_attr = MSG_SIMPLE_TAG;
+               cmd->prio_attr = TCM_SIMPLE_TAG;
                break;
        }
 
@@ -1240,7 +1240,7 @@ static int bot_submit_command(struct f_uas *fu,
                goto err;
        }
 
-       cmd->prio_attr = MSG_SIMPLE_TAG;
+       cmd->prio_attr = TCM_SIMPLE_TAG;
        se_cmd = &cmd->se_cmd;
        cmd->unpacked_lun = cbw->Lun;
        cmd->is_read = cbw->Flags & US_BULK_FLAG_IN ? 1 : 0;
index e752c3098f3803823ea2efa26ad27e7c81b42e7f..395649f357aa3b2103493a2602346dc9a3062125 100644 (file)
@@ -1739,7 +1739,7 @@ static int isp1760_hub_status_data(struct usb_hcd *hcd, char *buf)
        int retval = 1;
        unsigned long flags;
 
-       /* if !PM_RUNTIME, root hub timers won't get shut down ... */
+       /* if !PM, root hub timers won't get shut down ... */
        if (!HC_IS_RUNNING(hcd->state))
                return 0;
 
index 75811dd5a9d7a50f108decad2540171d08fb1741..036924e640f55fe6472ababc2aa54d5240dbb29e 100644 (file)
@@ -3087,7 +3087,7 @@ static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
        int ports, i, retval = 1;
        unsigned long flags;
 
-       /* if !PM_RUNTIME, root hub timers won't get shut down ... */
+       /* if !PM, root hub timers won't get shut down ... */
        if (!HC_IS_RUNNING(hcd->state))
                return 0;
 
index 50610a6acf3d313aec4c106fb6ae3837bdd31ca5..e999496eda3efa65c3cda54fcc287212313ee60b 100644 (file)
@@ -606,7 +606,7 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req,
        init_waitqueue_head(&tmr->tmr_wait);
 
        transport_init_se_cmd(se_cmd, tpg->se_tpg.se_tpg_tfo,
-               tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, MSG_SIMPLE_TAG,
+               tpg->tpg_nexus->tvn_se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG,
                &pending_req->sense_buffer[0]);
 
        rc = core_tmr_alloc_req(se_cmd, tmr, act, GFP_KERNEL);
index c04ef1d4f18a573726f83d7f5a1401f06ec9652b..97aff2879cda3c1f9295d6e480a4e737e48e3df2 100644 (file)
@@ -254,6 +254,7 @@ static char *scanarg(char *s, char del)
                                return NULL;
                }
        }
+       s[-1] ='\0';
        return s;
 }
 
@@ -378,8 +379,7 @@ static Node *create_entry(const char __user *buffer, size_t count)
                p = scanarg(p, del);
                if (!p)
                        goto einval;
-               p[-1] = '\0';
-               if (p == e->magic)
+               if (!e->magic[0])
                        goto einval;
                if (USE_DEBUG)
                        print_hex_dump_bytes(
@@ -391,8 +391,7 @@ static Node *create_entry(const char __user *buffer, size_t count)
                p = scanarg(p, del);
                if (!p)
                        goto einval;
-               p[-1] = '\0';
-               if (p == e->mask) {
+               if (!e->mask[0]) {
                        e->mask = NULL;
                        pr_debug("register:  mask[raw]: none\n");
                } else if (USE_DEBUG)
index e6fbbd74b716ae69da265779af5317c0057f1a18..7e607416755a880fef1a06d3a8a3482417c0b364 100644 (file)
@@ -3481,8 +3481,8 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info);
 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo);
 int btrfs_error_unpin_extent_range(struct btrfs_root *root,
                                   u64 start, u64 end);
-int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
-                              u64 num_bytes, u64 *actual_bytes);
+int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+                        u64 num_bytes, u64 *actual_bytes);
 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
                            struct btrfs_root *root, u64 type);
 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range);
index 30965120772bd2d814a97d9b4758f1bc0b8cfe6c..8c63419a7f70de04d2e5fd1a51a3a0e334513918 100644 (file)
@@ -4121,12 +4121,6 @@ again:
                if (ret)
                        break;
 
-               /* opt_discard */
-               if (btrfs_test_opt(root, DISCARD))
-                       ret = btrfs_error_discard_extent(root, start,
-                                                        end + 1 - start,
-                                                        NULL);
-
                clear_extent_dirty(unpin, start, end, GFP_NOFS);
                btrfs_error_unpin_extent_range(root, start, end);
                cond_resched();
index 222d6aea4a8a778cff8386873d8cb51ed5f86b16..a80b97100d90b3162d7d3688ed6b3c459bb56bc8 100644 (file)
@@ -1889,8 +1889,8 @@ static int btrfs_issue_discard(struct block_device *bdev,
        return blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_NOFS, 0);
 }
 
-static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
-                               u64 num_bytes, u64 *actual_bytes)
+int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
+                        u64 num_bytes, u64 *actual_bytes)
 {
        int ret;
        u64 discarded_bytes = 0;
@@ -5727,7 +5727,8 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
        update_global_block_rsv(fs_info);
 }
 
-static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
+static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
+                             const bool return_free_space)
 {
        struct btrfs_fs_info *fs_info = root->fs_info;
        struct btrfs_block_group_cache *cache = NULL;
@@ -5751,7 +5752,8 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
 
                if (start < cache->last_byte_to_unpin) {
                        len = min(len, cache->last_byte_to_unpin - start);
-                       btrfs_add_free_space(cache, start, len);
+                       if (return_free_space)
+                               btrfs_add_free_space(cache, start, len);
                }
 
                start += len;
@@ -5815,7 +5817,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
                                                   end + 1 - start, NULL);
 
                clear_extent_dirty(unpin, start, end, GFP_NOFS);
-               unpin_extent_range(root, start, end);
+               unpin_extent_range(root, start, end, true);
                cond_resched();
        }
 
@@ -8872,6 +8874,7 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
                                       cache_node);
                rb_erase(&block_group->cache_node,
                         &info->block_group_cache_tree);
+               RB_CLEAR_NODE(&block_group->cache_node);
                spin_unlock(&info->block_group_cache_lock);
 
                down_write(&block_group->space_info->groups_sem);
@@ -9130,6 +9133,7 @@ int btrfs_read_block_groups(struct btrfs_root *root)
                        spin_lock(&info->block_group_cache_lock);
                        rb_erase(&cache->cache_node,
                                 &info->block_group_cache_tree);
+                       RB_CLEAR_NODE(&cache->cache_node);
                        spin_unlock(&info->block_group_cache_lock);
                        btrfs_put_block_group(cache);
                        goto error;
@@ -9271,6 +9275,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
                spin_lock(&root->fs_info->block_group_cache_lock);
                rb_erase(&cache->cache_node,
                         &root->fs_info->block_group_cache_tree);
+               RB_CLEAR_NODE(&cache->cache_node);
                spin_unlock(&root->fs_info->block_group_cache_lock);
                btrfs_put_block_group(cache);
                return ret;
@@ -9690,13 +9695,7 @@ out:
 
 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
 {
-       return unpin_extent_range(root, start, end);
-}
-
-int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
-                              u64 num_bytes, u64 *actual_bytes)
-{
-       return btrfs_discard_extent(root, bytenr, num_bytes, actual_bytes);
+       return unpin_extent_range(root, start, end, false);
 }
 
 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
index 030847bf7cec803e36485e0999b7309d941877ef..d6c03f7f136b359c534668a38f9e9a72d299eb66 100644 (file)
@@ -2966,8 +2966,8 @@ static int do_trimming(struct btrfs_block_group_cache *block_group,
        spin_unlock(&block_group->lock);
        spin_unlock(&space_info->lock);
 
-       ret = btrfs_error_discard_extent(fs_info->extent_root,
-                                        start, bytes, &trimmed);
+       ret = btrfs_discard_extent(fs_info->extent_root,
+                                  start, bytes, &trimmed);
        if (!ret)
                *total_trimmed += trimmed;
 
@@ -3185,16 +3185,18 @@ out:
 
                spin_unlock(&block_group->lock);
 
+               lock_chunks(block_group->fs_info->chunk_root);
                em_tree = &block_group->fs_info->mapping_tree.map_tree;
                write_lock(&em_tree->lock);
                em = lookup_extent_mapping(em_tree, block_group->key.objectid,
                                           1);
                BUG_ON(!em); /* logic error, can't happen */
+               /*
+                * remove_extent_mapping() will delete us from the pinned_chunks
+                * list, which is protected by the chunk mutex.
+                */
                remove_extent_mapping(em_tree, em);
                write_unlock(&em_tree->lock);
-
-               lock_chunks(block_group->fs_info->chunk_root);
-               list_del_init(&em->list);
                unlock_chunks(block_group->fs_info->chunk_root);
 
                /* once for us and once for the tree */
index 0144790e296ed90ac0d17fef5ec382bf36ea7347..50c5a8762aedfc7bf5be640b96b3eb4622b58f88 100644 (file)
@@ -1485,7 +1485,7 @@ static void update_dev_time(char *path_name)
        struct file *filp;
 
        filp = filp_open(path_name, O_RDWR, 0);
-       if (!filp)
+       if (IS_ERR(filp))
                return;
        file_update_time(filp);
        filp_close(filp, NULL);
index 6e139111fdb250cc85f28d96d7d26fe0508850a4..22b289a3b1c4d3e12727cc0a005456fa9b295a00 100644 (file)
@@ -661,16 +661,16 @@ set_credits(struct TCP_Server_Info *server, const int val)
        server->ops->set_credits(server, val);
 }
 
-static inline __u64
+static inline __le64
 get_next_mid64(struct TCP_Server_Info *server)
 {
-       return server->ops->get_next_mid(server);
+       return cpu_to_le64(server->ops->get_next_mid(server));
 }
 
 static inline __le16
 get_next_mid(struct TCP_Server_Info *server)
 {
-       __u16 mid = get_next_mid64(server);
+       __u16 mid = server->ops->get_next_mid(server);
        /*
         * The value in the SMB header should be little endian for easy
         * on-the-wire decoding.
index b333ff60781d295809d8fa8f23366f9bcf8d9285..abae6dd2c6b998816db830f40934c86ea8c33fff 100644 (file)
@@ -926,6 +926,7 @@ cifs_NTtimeToUnix(__le64 ntutc)
 
        /* Subtract the NTFS time offset, then convert to 1s intervals. */
        s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
+       u64 abs_t;
 
        /*
         * Unfortunately can not use normal 64 bit division on 32 bit arch, but
@@ -933,13 +934,14 @@ cifs_NTtimeToUnix(__le64 ntutc)
         * to special case them
         */
        if (t < 0) {
-               t = -t;
-               ts.tv_nsec = (long)(do_div(t, 10000000) * 100);
+               abs_t = -t;
+               ts.tv_nsec = (long)(do_div(abs_t, 10000000) * 100);
                ts.tv_nsec = -ts.tv_nsec;
-               ts.tv_sec = -t;
+               ts.tv_sec = -abs_t;
        } else {
-               ts.tv_nsec = (long)do_div(t, 10000000) * 100;
-               ts.tv_sec = t;
+               abs_t = t;
+               ts.tv_nsec = (long)do_div(abs_t, 10000000) * 100;
+               ts.tv_sec = abs_t;
        }
 
        return ts;
index 8eaf20a806494c71002a668a3e49b159b1b66d71..c295338e0a98ce95a71c60afedc87ad9bd5267b5 100644 (file)
@@ -69,7 +69,8 @@ static inline void dump_cifs_file_struct(struct file *file, char *label)
  * Attempt to preload the dcache with the results from the FIND_FIRST/NEXT
  *
  * Find the dentry that matches "name". If there isn't one, create one. If it's
- * a negative dentry or the uniqueid changed, then drop it and recreate it.
+ * a negative dentry or the uniqueid or filetype(mode) changed,
+ * then drop it and recreate it.
  */
 static void
 cifs_prime_dcache(struct dentry *parent, struct qstr *name,
@@ -97,8 +98,11 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
                        if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
                                fattr->cf_uniqueid = CIFS_I(inode)->uniqueid;
 
-                       /* update inode in place if i_ino didn't change */
-                       if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+                       /* update inode in place
+                        * if both i_ino and i_mode didn't change */
+                       if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid &&
+                           (inode->i_mode & S_IFMT) ==
+                           (fattr->cf_mode & S_IFMT)) {
                                cifs_fattr_to_inode(inode, fattr);
                                goto out;
                        }
index f1cefc9763edaeb3115ee1868d9bc4f033b7e0f5..689f035915cf70f075d71fca5e281ec009c5420a 100644 (file)
 static int
 check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
 {
+       __u64 wire_mid = le64_to_cpu(hdr->MessageId);
+
        /*
         * Make sure that this really is an SMB, that it is a response,
         * and that the message ids match.
         */
        if ((*(__le32 *)hdr->ProtocolId == SMB2_PROTO_NUMBER) &&
-           (mid == hdr->MessageId)) {
+           (mid == wire_mid)) {
                if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
                        return 0;
                else {
@@ -51,11 +53,11 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
                if (*(__le32 *)hdr->ProtocolId != SMB2_PROTO_NUMBER)
                        cifs_dbg(VFS, "Bad protocol string signature header %x\n",
                                 *(unsigned int *) hdr->ProtocolId);
-               if (mid != hdr->MessageId)
+               if (mid != wire_mid)
                        cifs_dbg(VFS, "Mids do not match: %llu and %llu\n",
-                                mid, hdr->MessageId);
+                                mid, wire_mid);
        }
-       cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", hdr->MessageId);
+       cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", wire_mid);
        return 1;
 }
 
@@ -95,7 +97,7 @@ smb2_check_message(char *buf, unsigned int length)
 {
        struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
        struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
-       __u64 mid = hdr->MessageId;
+       __u64 mid = le64_to_cpu(hdr->MessageId);
        __u32 len = get_rfc1002_length(buf);
        __u32 clc_len;  /* calculated length */
        int command;
index 93fd0586f9ec6e661c17de59cb66535d80e7ff51..96b5d40a2ece611b27ed19668cc4b7b665605113 100644 (file)
@@ -176,10 +176,11 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
 {
        struct mid_q_entry *mid;
        struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+       __u64 wire_mid = le64_to_cpu(hdr->MessageId);
 
        spin_lock(&GlobalMid_Lock);
        list_for_each_entry(mid, &server->pending_mid_q, qhead) {
-               if ((mid->mid == hdr->MessageId) &&
+               if ((mid->mid == wire_mid) &&
                    (mid->mid_state == MID_REQUEST_SUBMITTED) &&
                    (mid->command == hdr->Command)) {
                        spin_unlock(&GlobalMid_Lock);
index ce858477002a6148e31a85e9f28fc012968f52c9..70867d54fb8bf485cb5ff4dcb3049f67ca86cb45 100644 (file)
@@ -110,7 +110,7 @@ struct smb2_hdr {
        __le16 CreditRequest;  /* CreditResponse */
        __le32 Flags;
        __le32 NextCommand;
-       __u64  MessageId;       /* opaque - so can stay little endian */
+       __le64 MessageId;
        __le32 ProcessId;
        __u32  TreeId;          /* opaque - so do not make little endian */
        __u64  SessionId;       /* opaque - so do not make little endian */
index 5111e7272db62e718fcb3d968af48083d598bccf..d4c5b6f109a7feaa6f2c99f21ca332ff41a2673f 100644 (file)
@@ -490,7 +490,7 @@ smb2_mid_entry_alloc(const struct smb2_hdr *smb_buffer,
                return temp;
        else {
                memset(temp, 0, sizeof(struct mid_q_entry));
-               temp->mid = smb_buffer->MessageId;      /* always LE */
+               temp->mid = le64_to_cpu(smb_buffer->MessageId);
                temp->pid = current->pid;
                temp->command = smb_buffer->Command;    /* Always LE */
                temp->when_alloc = jiffies;
index c2d6604667b052bf343febc88969f60f57568290..719e1ce1c60930ec8e2c316631d21ec2bac03cc3 100644 (file)
@@ -1917,7 +1917,6 @@ ecryptfs_decode_from_filename(unsigned char *dst, size_t *dst_size,
                        break;
                case 2:
                        dst[dst_byte_offset++] |= (src_byte);
-                       dst[dst_byte_offset] = 0;
                        current_bit_offset = 0;
                        break;
                }
index 80154ec4f8c20118a06d6e38a806180379b52abd..6f4e659f508f303bdadcc8922b2cc1a1397bb51b 100644 (file)
@@ -190,23 +190,11 @@ static int ecryptfs_open(struct inode *inode, struct file *file)
 {
        int rc = 0;
        struct ecryptfs_crypt_stat *crypt_stat = NULL;
-       struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
        struct dentry *ecryptfs_dentry = file->f_path.dentry;
        /* Private value of ecryptfs_dentry allocated in
         * ecryptfs_lookup() */
        struct ecryptfs_file_info *file_info;
 
-       mount_crypt_stat = &ecryptfs_superblock_to_private(
-               ecryptfs_dentry->d_sb)->mount_crypt_stat;
-       if ((mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
-           && ((file->f_flags & O_WRONLY) || (file->f_flags & O_RDWR)
-               || (file->f_flags & O_CREAT) || (file->f_flags & O_TRUNC)
-               || (file->f_flags & O_APPEND))) {
-               printk(KERN_WARNING "Mount has encrypted view enabled; "
-                      "files may only be read\n");
-               rc = -EPERM;
-               goto out;
-       }
        /* Released in ecryptfs_release or end of function if failure */
        file_info = kmem_cache_zalloc(ecryptfs_file_info_cache, GFP_KERNEL);
        ecryptfs_set_file_private(file, file_info);
index 635e8e16a5b7692b1d45b7629d404beeafdb8567..917bd5c9776aabcff5b482f59bd8d316193ead35 100644 (file)
@@ -100,12 +100,12 @@ int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
        (*size) = 0;
        if (data[0] < 192) {
                /* One-byte length */
-               (*size) = (unsigned char)data[0];
+               (*size) = data[0];
                (*length_size) = 1;
        } else if (data[0] < 224) {
                /* Two-byte length */
-               (*size) = (((unsigned char)(data[0]) - 192) * 256);
-               (*size) += ((unsigned char)(data[1]) + 192);
+               (*size) = (data[0] - 192) * 256;
+               (*size) += data[1] + 192;
                (*length_size) = 2;
        } else if (data[0] == 255) {
                /* If support is added, adjust ECRYPTFS_MAX_PKT_LEN_SIZE */
index c4cd1fd86cc2ffd4a09beddd3aca3f25d1fdb06f..d9eb84bda5591a36c68c6519faf591daf7a05efa 100644 (file)
@@ -493,6 +493,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
 {
        struct super_block *s;
        struct ecryptfs_sb_info *sbi;
+       struct ecryptfs_mount_crypt_stat *mount_crypt_stat;
        struct ecryptfs_dentry_info *root_info;
        const char *err = "Getting sb failed";
        struct inode *inode;
@@ -511,6 +512,7 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
                err = "Error parsing options";
                goto out;
        }
+       mount_crypt_stat = &sbi->mount_crypt_stat;
 
        s = sget(fs_type, NULL, set_anon_super, flags, NULL);
        if (IS_ERR(s)) {
@@ -557,11 +559,19 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
 
        /**
         * Set the POSIX ACL flag based on whether they're enabled in the lower
-        * mount. Force a read-only eCryptfs mount if the lower mount is ro.
-        * Allow a ro eCryptfs mount even when the lower mount is rw.
+        * mount.
         */
        s->s_flags = flags & ~MS_POSIXACL;
-       s->s_flags |= path.dentry->d_sb->s_flags & (MS_RDONLY | MS_POSIXACL);
+       s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
+
+       /**
+        * Force a read-only eCryptfs mount when:
+        *   1) The lower mount is ro
+        *   2) The ecryptfs_encrypted_view mount option is specified
+        */
+       if (path.dentry->d_sb->s_flags & MS_RDONLY ||
+           mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
+               s->s_flags |= MS_RDONLY;
 
        s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
        s->s_blocksize = path.dentry->d_sb->s_blocksize;
index 503ea15dc5dbe07b5ffe5ea891ea2b10a287012a..370420bfae8d76aaf7f68d21cfb5f1c06cf7770c 100644 (file)
@@ -267,7 +267,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
        handle_t *handle;
        ext4_lblk_t orig_blk_offset, donor_blk_offset;
        unsigned long blocksize = orig_inode->i_sb->s_blocksize;
-       unsigned int w_flags = 0;
        unsigned int tmp_data_size, data_size, replaced_size;
        int err2, jblocks, retries = 0;
        int replaced_count = 0;
@@ -288,9 +287,6 @@ again:
                return 0;
        }
 
-       if (segment_eq(get_fs(), KERNEL_DS))
-               w_flags |= AOP_FLAG_UNINTERRUPTIBLE;
-
        orig_blk_offset = orig_page_offset * blocks_per_page +
                data_offset_in_page;
 
index bb63254ed8486f42200230b4bbaa80257f92700d..735d7522a3a911f19af593d6b5f7d366d6cf448d 100644 (file)
@@ -362,6 +362,9 @@ repeat:
                        rs.cont_size = isonum_733(rr->u.CE.size);
                        break;
                case SIG('E', 'R'):
+                       /* Invalid length of ER tag id? */
+                       if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
+                               goto out;
                        ISOFS_SB(inode->i_sb)->s_rock = 1;
                        printk(KERN_DEBUG "ISO 9660 Extensions: ");
                        {
index 697390ea47b8fe20c301526bbeaddb505b7ab820..ddc9f9612f168f026ea71aa4f4aac41bf6860369 100644 (file)
@@ -448,27 +448,6 @@ static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
        return pol;
 }
 
-static int kernfs_vma_migrate(struct vm_area_struct *vma,
-                             const nodemask_t *from, const nodemask_t *to,
-                             unsigned long flags)
-{
-       struct file *file = vma->vm_file;
-       struct kernfs_open_file *of = kernfs_of(file);
-       int ret;
-
-       if (!of->vm_ops)
-               return 0;
-
-       if (!kernfs_get_active(of->kn))
-               return 0;
-
-       ret = 0;
-       if (of->vm_ops->migrate)
-               ret = of->vm_ops->migrate(vma, from, to, flags);
-
-       kernfs_put_active(of->kn);
-       return ret;
-}
 #endif
 
 static const struct vm_operations_struct kernfs_vm_ops = {
@@ -479,7 +458,6 @@ static const struct vm_operations_struct kernfs_vm_ops = {
 #ifdef CONFIG_NUMA
        .set_policy     = kernfs_vma_set_policy,
        .get_policy     = kernfs_vma_get_policy,
-       .migrate        = kernfs_vma_migrate,
 #endif
 };
 
index bf2d03f8fd3e1ae6fec98e5bd7f81bb1410bf6dd..510413eb25b8bbff35853232c5b72f0881eaf78c 100644 (file)
@@ -159,7 +159,7 @@ static int show_stat(struct seq_file *p, void *v)
 
        /* sum again ? it could be updated? */
        for_each_irq_nr(j)
-               seq_put_decimal_ull(p, ' ', kstat_irqs(j));
+               seq_put_decimal_ull(p, ' ', kstat_irqs_usr(j));
 
        seq_printf(p,
                "\nctxt %llu\n"
index 73ca1740d839513468ced1b589bf5e469f391689..0f96f71ab32bab79baa01a2b473b54417512a8bc 100644 (file)
@@ -91,6 +91,7 @@ static void show_type(struct seq_file *m, struct super_block *sb)
 
 static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
 {
+       struct proc_mounts *p = proc_mounts(m);
        struct mount *r = real_mount(mnt);
        int err = 0;
        struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
@@ -104,7 +105,10 @@ static int show_vfsmnt(struct seq_file *m, struct vfsmount *mnt)
                mangle(m, r->mnt_devname ? r->mnt_devname : "none");
        }
        seq_putc(m, ' ');
-       seq_path(m, &mnt_path, " \t\n\\");
+       /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+       err = seq_path_root(m, &mnt_path, &p->root, " \t\n\\");
+       if (err)
+               goto out;
        seq_putc(m, ' ');
        show_type(m, sb);
        seq_puts(m, __mnt_is_readonly(mnt) ? " ro" : " rw");
@@ -125,7 +129,6 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
        struct mount *r = real_mount(mnt);
        struct super_block *sb = mnt->mnt_sb;
        struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
-       struct path root = p->root;
        int err = 0;
 
        seq_printf(m, "%i %i %u:%u ", r->mnt_id, r->mnt_parent->mnt_id,
@@ -139,7 +142,7 @@ static int show_mountinfo(struct seq_file *m, struct vfsmount *mnt)
        seq_putc(m, ' ');
 
        /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
-       err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
+       err = seq_path_root(m, &mnt_path, &p->root, " \t\n\\");
        if (err)
                goto out;
 
@@ -182,6 +185,7 @@ out:
 
 static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
 {
+       struct proc_mounts *p = proc_mounts(m);
        struct mount *r = real_mount(mnt);
        struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
        struct super_block *sb = mnt_path.dentry->d_sb;
@@ -201,7 +205,10 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
 
        /* mount point */
        seq_puts(m, " mounted on ");
-       seq_path(m, &mnt_path, " \t\n\\");
+       /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+       err = seq_path_root(m, &mnt_path, &p->root, " \t\n\\");
+       if (err)
+               goto out;
        seq_putc(m, ' ');
 
        /* file system type */
@@ -216,6 +223,7 @@ static int show_vfsstat(struct seq_file *m, struct vfsmount *mnt)
        }
 
        seq_putc(m, '\n');
+out:
        return err;
 }
 
index a012c51caffd2a195b6015b9594d0f1862dba324..05e90edd199214fd0507b5e5b79a3b60ea8a49d3 100644 (file)
@@ -57,6 +57,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
        sector_t offset;
        int i, num, ret = 0;
        struct extent_position epos = { NULL, 0, {0, 0} };
+       struct super_block *sb = dir->i_sb;
 
        if (ctx->pos == 0) {
                if (!dir_emit_dot(file, ctx))
@@ -76,16 +77,16 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
        if (nf_pos == 0)
                nf_pos = udf_ext0_offset(dir);
 
-       fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
+       fibh.soffset = fibh.eoffset = nf_pos & (sb->s_blocksize - 1);
        if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
-               if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
+               if (inode_bmap(dir, nf_pos >> sb->s_blocksize_bits,
                    &epos, &eloc, &elen, &offset)
                    != (EXT_RECORDED_ALLOCATED >> 30)) {
                        ret = -ENOENT;
                        goto out;
                }
-               block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
-               if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
+               block = udf_get_lb_pblock(sb, &eloc, offset);
+               if ((++offset << sb->s_blocksize_bits) < elen) {
                        if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
                                epos.offset -= sizeof(struct short_ad);
                        else if (iinfo->i_alloc_type ==
@@ -95,18 +96,18 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                        offset = 0;
                }
 
-               if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
+               if (!(fibh.sbh = fibh.ebh = udf_tread(sb, block))) {
                        ret = -EIO;
                        goto out;
                }
 
-               if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
-                       i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
-                       if (i + offset > (elen >> dir->i_sb->s_blocksize_bits))
-                               i = (elen >> dir->i_sb->s_blocksize_bits) - offset;
+               if (!(offset & ((16 >> (sb->s_blocksize_bits - 9)) - 1))) {
+                       i = 16 >> (sb->s_blocksize_bits - 9);
+                       if (i + offset > (elen >> sb->s_blocksize_bits))
+                               i = (elen >> sb->s_blocksize_bits) - offset;
                        for (num = 0; i > 0; i--) {
-                               block = udf_get_lb_pblock(dir->i_sb, &eloc, offset + i);
-                               tmp = udf_tgetblk(dir->i_sb, block);
+                               block = udf_get_lb_pblock(sb, &eloc, offset + i);
+                               tmp = udf_tgetblk(sb, block);
                                if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
                                        bha[num++] = tmp;
                                else
@@ -152,12 +153,12 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                }
 
                if ((cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
-                       if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
+                       if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
                                continue;
                }
 
                if ((cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
-                       if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
+                       if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
                                continue;
                }
 
@@ -167,12 +168,12 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
                        continue;
                }
 
-               flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+               flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN);
                if (!flen)
                        continue;
 
                tloc = lelb_to_cpu(cfi.icb.extLocation);
-               iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0);
+               iblock = udf_get_lb_pblock(sb, &tloc, 0);
                if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN))
                        goto out;
        } /* end while */
index c9b4df5810d52560b084b9557150faf8cfbe6e29..5bc71d9a674a7e5dfc3ff882ee61a2591a328c7b 100644 (file)
@@ -1489,6 +1489,20 @@ reread:
        }
        inode->i_generation = iinfo->i_unique;
 
+       /* Sanity checks for files in ICB so that we don't get confused later */
+       if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+               /*
+                * For file in ICB data is stored in allocation descriptor
+                * so sizes should match
+                */
+               if (iinfo->i_lenAlloc != inode->i_size)
+                       goto out;
+               /* File in ICB has to fit in there... */
+               if (inode->i_size > inode->i_sb->s_blocksize -
+                                       udf_file_entry_alloc_offset(inode))
+                       goto out;
+       }
+
        switch (fe->icbTag.fileType) {
        case ICBTAG_FILE_TYPE_DIRECTORY:
                inode->i_op = &udf_dir_inode_operations;
index c12e260fd6c417eb9c690782b8860f0e5eeff8d9..33b246b82c98510289d533fcdcbc59154d38589f 100644 (file)
@@ -159,18 +159,19 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
        struct udf_inode_info *dinfo = UDF_I(dir);
        int isdotdot = child->len == 2 &&
                child->name[0] == '.' && child->name[1] == '.';
+       struct super_block *sb = dir->i_sb;
 
        size = udf_ext0_offset(dir) + dir->i_size;
        f_pos = udf_ext0_offset(dir);
 
        fibh->sbh = fibh->ebh = NULL;
-       fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
+       fibh->soffset = fibh->eoffset = f_pos & (sb->s_blocksize - 1);
        if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
-               if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
+               if (inode_bmap(dir, f_pos >> sb->s_blocksize_bits, &epos,
                    &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30))
                        goto out_err;
-               block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
-               if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
+               block = udf_get_lb_pblock(sb, &eloc, offset);
+               if ((++offset << sb->s_blocksize_bits) < elen) {
                        if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
                                epos.offset -= sizeof(struct short_ad);
                        else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
@@ -178,7 +179,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                } else
                        offset = 0;
 
-               fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
+               fibh->sbh = fibh->ebh = udf_tread(sb, block);
                if (!fibh->sbh)
                        goto out_err;
        }
@@ -217,12 +218,12 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                }
 
                if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
-                       if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
+                       if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
                                continue;
                }
 
                if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
-                       if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
+                       if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
                                continue;
                }
 
@@ -233,7 +234,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
                if (!lfi)
                        continue;
 
-               flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+               flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN);
                if (flen && udf_match(flen, fname, child->len, child->name))
                        goto out_ok;
        }
index 6fb7945c1e6e8813afce2ad81aa41ea28a6f4565..ac10ca939f267283ba0f64da9d4ec82de0eeeb61 100644 (file)
 #include <linux/buffer_head.h>
 #include "udf_i.h"
 
-static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
-                          int fromlen, unsigned char *to)
+static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
+                         int fromlen, unsigned char *to, int tolen)
 {
        struct pathComponent *pc;
        int elen = 0;
+       int comp_len;
        unsigned char *p = to;
 
+       /* Reserve one byte for terminating \0 */
+       tolen--;
        while (elen < fromlen) {
                pc = (struct pathComponent *)(from + elen);
+               elen += sizeof(struct pathComponent);
                switch (pc->componentType) {
                case 1:
                        /*
                         * Symlink points to some place which should be agreed
                         * upon between originator and receiver of the media. Ignore.
                         */
-                       if (pc->lengthComponentIdent > 0)
+                       if (pc->lengthComponentIdent > 0) {
+                               elen += pc->lengthComponentIdent;
                                break;
+                       }
                        /* Fall through */
                case 2:
+                       if (tolen == 0)
+                               return -ENAMETOOLONG;
                        p = to;
                        *p++ = '/';
+                       tolen--;
                        break;
                case 3:
+                       if (tolen < 3)
+                               return -ENAMETOOLONG;
                        memcpy(p, "../", 3);
                        p += 3;
+                       tolen -= 3;
                        break;
                case 4:
+                       if (tolen < 2)
+                               return -ENAMETOOLONG;
                        memcpy(p, "./", 2);
                        p += 2;
+                       tolen -= 2;
                        /* that would be . - just ignore */
                        break;
                case 5:
-                       p += udf_get_filename(sb, pc->componentIdent, p,
-                                             pc->lengthComponentIdent);
+                       elen += pc->lengthComponentIdent;
+                       if (elen > fromlen)
+                               return -EIO;
+                       comp_len = udf_get_filename(sb, pc->componentIdent,
+                                                   pc->lengthComponentIdent,
+                                                   p, tolen);
+                       p += comp_len;
+                       tolen -= comp_len;
+                       if (tolen == 0)
+                               return -ENAMETOOLONG;
                        *p++ = '/';
+                       tolen--;
                        break;
                }
-               elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
        }
        if (p > to + 1)
                p[-1] = '\0';
        else
                p[0] = '\0';
+       return 0;
 }
 
 static int udf_symlink_filler(struct file *file, struct page *page)
@@ -80,11 +104,17 @@ static int udf_symlink_filler(struct file *file, struct page *page)
        struct inode *inode = page->mapping->host;
        struct buffer_head *bh = NULL;
        unsigned char *symlink;
-       int err = -EIO;
+       int err;
        unsigned char *p = kmap(page);
        struct udf_inode_info *iinfo;
        uint32_t pos;
 
+       /* We don't support symlinks longer than one block */
+       if (inode->i_size > inode->i_sb->s_blocksize) {
+               err = -ENAMETOOLONG;
+               goto out_unmap;
+       }
+
        iinfo = UDF_I(inode);
        pos = udf_block_map(inode, 0);
 
@@ -94,14 +124,18 @@ static int udf_symlink_filler(struct file *file, struct page *page)
        } else {
                bh = sb_bread(inode->i_sb, pos);
 
-               if (!bh)
-                       goto out;
+               if (!bh) {
+                       err = -EIO;
+                       goto out_unlock_inode;
+               }
 
                symlink = bh->b_data;
        }
 
-       udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
+       err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
        brelse(bh);
+       if (err)
+               goto out_unlock_inode;
 
        up_read(&iinfo->i_data_sem);
        SetPageUptodate(page);
@@ -109,9 +143,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
        unlock_page(page);
        return 0;
 
-out:
+out_unlock_inode:
        up_read(&iinfo->i_data_sem);
        SetPageError(page);
+out_unmap:
        kunmap(page);
        unlock_page(page);
        return err;
index 1cc3c993ebd04f4adb7b425f500e40d1185aae9f..47bb3f5ca360d4f1be8f92036685278868ab99e3 100644 (file)
@@ -211,7 +211,8 @@ udf_get_lb_pblock(struct super_block *sb, struct kernel_lb_addr *loc,
 }
 
 /* unicode.c */
-extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
+extern int udf_get_filename(struct super_block *, uint8_t *, int, uint8_t *,
+                           int);
 extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
                            int);
 extern int udf_build_ustr(struct ustr *, dstring *, int);
index afd470e588ffbbd24ec886b3e8a619833a5e3e9a..b84fee372734bd494ba5eb86f5ce5c8c28b99b5a 100644 (file)
@@ -28,7 +28,8 @@
 
 #include "udf_sb.h"
 
-static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
+static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *,
+                                 int);
 
 static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
 {
@@ -333,8 +334,8 @@ try_again:
        return u_len + 1;
 }
 
-int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
-                    int flen)
+int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
+                    uint8_t *dname, int dlen)
 {
        struct ustr *filename, *unifilename;
        int len = 0;
@@ -347,7 +348,7 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
        if (!unifilename)
                goto out1;
 
-       if (udf_build_ustr_exact(unifilename, sname, flen))
+       if (udf_build_ustr_exact(unifilename, sname, slen))
                goto out2;
 
        if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
@@ -366,7 +367,8 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
        } else
                goto out2;
 
-       len = udf_translate_to_linux(dname, filename->u_name, filename->u_len,
+       len = udf_translate_to_linux(dname, dlen,
+                                    filename->u_name, filename->u_len,
                                     unifilename->u_name, unifilename->u_len);
 out2:
        kfree(unifilename);
@@ -403,10 +405,12 @@ int udf_put_filename(struct super_block *sb, const uint8_t *sname,
 #define EXT_MARK               '.'
 #define CRC_MARK               '#'
 #define EXT_SIZE               5
+/* Number of chars we need to store generated CRC to make filename unique */
+#define CRC_LEN                        5
 
-static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
-                                 int udfLen, uint8_t *fidName,
-                                 int fidNameLen)
+static int udf_translate_to_linux(uint8_t *newName, int newLen,
+                                 uint8_t *udfName, int udfLen,
+                                 uint8_t *fidName, int fidNameLen)
 {
        int index, newIndex = 0, needsCRC = 0;
        int extIndex = 0, newExtIndex = 0, hasExt = 0;
@@ -439,7 +443,7 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
                                        newExtIndex = newIndex;
                                }
                        }
-                       if (newIndex < 256)
+                       if (newIndex < newLen)
                                newName[newIndex++] = curr;
                        else
                                needsCRC = 1;
@@ -467,13 +471,13 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
                                }
                                ext[localExtIndex++] = curr;
                        }
-                       maxFilenameLen = 250 - localExtIndex;
+                       maxFilenameLen = newLen - CRC_LEN - localExtIndex;
                        if (newIndex > maxFilenameLen)
                                newIndex = maxFilenameLen;
                        else
                                newIndex = newExtIndex;
-               } else if (newIndex > 250)
-                       newIndex = 250;
+               } else if (newIndex > newLen - CRC_LEN)
+                       newIndex = newLen - CRC_LEN;
                newName[newIndex++] = CRC_MARK;
                valueCRC = crc_itu_t(0, fidName, fidNameLen);
                newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
index 8ba35c622e2202a889a3a06706bdc93e035fb841..e1b2e8b98af7cde2c7276e915cd90ea4794b317e 100644 (file)
@@ -901,11 +901,15 @@ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
 extern int drm_wait_vblank(struct drm_device *dev, void *data,
                           struct drm_file *filp);
 extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
                                     struct timeval *vblanktime);
 extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
                                     struct drm_pending_vblank_event *e);
+extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+                                      struct drm_pending_vblank_event *e);
 extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
 extern int drm_vblank_get(struct drm_device *dev, int crtc);
 extern void drm_vblank_put(struct drm_device *dev, int crtc);
 extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
index 780511a459c01e3012efbfe2912ee27dc5996971..1e6ae1458f7ab98ef42cd66ef9834cef8f276718 100644 (file)
@@ -119,13 +119,6 @@ struct drm_gem_object {
         * simply leave it as NULL.
         */
        struct dma_buf_attachment *import_attach;
-
-       /**
-        * dumb - created as dumb buffer
-        * Whether the gem object was created using the dumb buffer interface
-        * as such it may not be used for GPU rendering.
-        */
-       bool dumb;
 };
 
 void drm_gem_object_release(struct drm_gem_object *obj);
diff --git a/include/dt-bindings/clock/exynos4415.h b/include/dt-bindings/clock/exynos4415.h
new file mode 100644 (file)
index 0000000..7eed551
--- /dev/null
@@ -0,0 +1,360 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Chanwoo Choi <cw00.choi@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Device Tree binding constants for Samsung Exynos4415 clock controllers.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H
+#define _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H
+
+/*
+ * Let each exported clock get a unique index, which is used on DT-enabled
+ * platforms to lookup the clock from a clock specifier. These indices are
+ * therefore considered an ABI and so must not be changed. This implies
+ * that new clocks should be added either in free spaces between clock groups
+ * or at the end.
+ */
+
+/*
+ * Main CMU
+ */
+
+#define CLK_OSCSEL                     1
+#define CLK_FIN_PLL                    2
+#define CLK_FOUT_APLL                  3
+#define CLK_FOUT_MPLL                  4
+#define CLK_FOUT_EPLL                  5
+#define CLK_FOUT_G3D_PLL               6
+#define CLK_FOUT_ISP_PLL               7
+#define CLK_FOUT_DISP_PLL              8
+
+/* Muxes */
+#define CLK_MOUT_MPLL_USER_L           16
+#define CLK_MOUT_GDL                   17
+#define CLK_MOUT_MPLL_USER_R           18
+#define CLK_MOUT_GDR                   19
+#define CLK_MOUT_EBI                   20
+#define CLK_MOUT_ACLK_200              21
+#define CLK_MOUT_ACLK_160              22
+#define CLK_MOUT_ACLK_100              23
+#define CLK_MOUT_ACLK_266              24
+#define CLK_MOUT_G3D_PLL               25
+#define CLK_MOUT_EPLL                  26
+#define CLK_MOUT_EBI_1                 27
+#define CLK_MOUT_ISP_PLL               28
+#define CLK_MOUT_DISP_PLL              29
+#define CLK_MOUT_MPLL_USER_T           30
+#define CLK_MOUT_ACLK_400_MCUISP       31
+#define CLK_MOUT_G3D_PLLSRC            32
+#define CLK_MOUT_CSIS1                 33
+#define CLK_MOUT_CSIS0                 34
+#define CLK_MOUT_CAM1                  35
+#define CLK_MOUT_FIMC3_LCLK            36
+#define CLK_MOUT_FIMC2_LCLK            37
+#define CLK_MOUT_FIMC1_LCLK            38
+#define CLK_MOUT_FIMC0_LCLK            39
+#define CLK_MOUT_MFC                   40
+#define CLK_MOUT_MFC_1                 41
+#define CLK_MOUT_MFC_0                 42
+#define CLK_MOUT_G3D                   43
+#define CLK_MOUT_G3D_1                 44
+#define CLK_MOUT_G3D_0                 45
+#define CLK_MOUT_MIPI0                 46
+#define CLK_MOUT_FIMD0                 47
+#define CLK_MOUT_TSADC_ISP             48
+#define CLK_MOUT_UART_ISP              49
+#define CLK_MOUT_SPI1_ISP              50
+#define CLK_MOUT_SPI0_ISP              51
+#define CLK_MOUT_PWM_ISP               52
+#define CLK_MOUT_AUDIO0                        53
+#define CLK_MOUT_TSADC                 54
+#define CLK_MOUT_MMC2                  55
+#define CLK_MOUT_MMC1                  56
+#define CLK_MOUT_MMC0                  57
+#define CLK_MOUT_UART3                 58
+#define CLK_MOUT_UART2                 59
+#define CLK_MOUT_UART1                 60
+#define CLK_MOUT_UART0                 61
+#define CLK_MOUT_SPI2                  62
+#define CLK_MOUT_SPI1                  63
+#define CLK_MOUT_SPI0                  64
+#define CLK_MOUT_SPDIF                 65
+#define CLK_MOUT_AUDIO2                        66
+#define CLK_MOUT_AUDIO1                        67
+#define CLK_MOUT_MPLL_USER_C           68
+#define CLK_MOUT_HPM                   69
+#define CLK_MOUT_CORE                  70
+#define CLK_MOUT_APLL                  71
+#define CLK_MOUT_PXLASYNC_CSIS1_FIMC   72
+#define CLK_MOUT_PXLASYNC_CSIS0_FIMC   73
+#define CLK_MOUT_JPEG                  74
+#define CLK_MOUT_JPEG1                 75
+#define CLK_MOUT_JPEG0                 76
+#define CLK_MOUT_ACLK_ISP0_300         77
+#define CLK_MOUT_ACLK_ISP0_400         78
+#define CLK_MOUT_ACLK_ISP0_300_USER    79
+#define CLK_MOUT_ACLK_ISP1_300         80
+#define CLK_MOUT_ACLK_ISP1_300_USER    81
+#define CLK_MOUT_HDMI                  82
+
+/* Dividers */
+#define CLK_DIV_GPL                    90
+#define CLK_DIV_GDL                    91
+#define CLK_DIV_GPR                    92
+#define CLK_DIV_GDR                    93
+#define CLK_DIV_ACLK_400_MCUISP                94
+#define CLK_DIV_EBI                    95
+#define CLK_DIV_ACLK_200               96
+#define CLK_DIV_ACLK_160               97
+#define CLK_DIV_ACLK_100               98
+#define CLK_DIV_ACLK_266               99
+#define CLK_DIV_CSIS1                  100
+#define CLK_DIV_CSIS0                  101
+#define CLK_DIV_CAM1                   102
+#define CLK_DIV_FIMC3_LCLK             103
+#define CLK_DIV_FIMC2_LCLK             104
+#define CLK_DIV_FIMC1_LCLK             105
+#define CLK_DIV_FIMC0_LCLK             106
+#define CLK_DIV_TV_BLK                 107
+#define CLK_DIV_MFC                    108
+#define CLK_DIV_G3D                    109
+#define CLK_DIV_MIPI0_PRE              110
+#define CLK_DIV_MIPI0                  111
+#define CLK_DIV_FIMD0                  112
+#define CLK_DIV_UART_ISP               113
+#define CLK_DIV_SPI1_ISP_PRE           114
+#define CLK_DIV_SPI1_ISP               115
+#define CLK_DIV_SPI0_ISP_PRE           116
+#define CLK_DIV_SPI0_ISP               117
+#define CLK_DIV_PWM_ISP                        118
+#define CLK_DIV_PCM0                   119
+#define CLK_DIV_AUDIO0                 120
+#define CLK_DIV_TSADC_PRE              121
+#define CLK_DIV_TSADC                  122
+#define CLK_DIV_MMC1_PRE               123
+#define CLK_DIV_MMC1                   124
+#define CLK_DIV_MMC0_PRE               125
+#define CLK_DIV_MMC0                   126
+#define CLK_DIV_MMC2_PRE               127
+#define CLK_DIV_MMC2                   128
+#define CLK_DIV_UART3                  129
+#define CLK_DIV_UART2                  130
+#define CLK_DIV_UART1                  131
+#define CLK_DIV_UART0                  132
+#define CLK_DIV_SPI1_PRE               133
+#define CLK_DIV_SPI1                   134
+#define CLK_DIV_SPI0_PRE               135
+#define CLK_DIV_SPI0                   136
+#define CLK_DIV_SPI2_PRE               137
+#define CLK_DIV_SPI2                   138
+#define CLK_DIV_PCM2                   139
+#define CLK_DIV_AUDIO2                 140
+#define CLK_DIV_PCM1                   141
+#define CLK_DIV_AUDIO1                 142
+#define CLK_DIV_I2S1                   143
+#define CLK_DIV_PXLASYNC_CSIS1_FIMC    144
+#define CLK_DIV_PXLASYNC_CSIS0_FIMC    145
+#define CLK_DIV_JPEG                   146
+#define CLK_DIV_CORE2                  147
+#define CLK_DIV_APLL                   148
+#define CLK_DIV_PCLK_DBG               149
+#define CLK_DIV_ATB                    150
+#define CLK_DIV_PERIPH                 151
+#define CLK_DIV_COREM1                 152
+#define CLK_DIV_COREM0                 153
+#define CLK_DIV_CORE                   154
+#define CLK_DIV_HPM                    155
+#define CLK_DIV_COPY                   156
+
+/* Gates */
+#define CLK_ASYNC_G3D                  180
+#define CLK_ASYNC_MFCL                 181
+#define CLK_ASYNC_TVX                  182
+#define CLK_PPMULEFT                   183
+#define CLK_GPIO_LEFT                  184
+#define CLK_PPMUIMAGE                  185
+#define CLK_QEMDMA2                    186
+#define CLK_QEROTATOR                  187
+#define CLK_SMMUMDMA2                  188
+#define CLK_SMMUROTATOR                        189
+#define CLK_MDMA2                      190
+#define CLK_ROTATOR                    191
+#define CLK_ASYNC_ISPMX                        192
+#define CLK_ASYNC_MAUDIOX              193
+#define CLK_ASYNC_MFCR                 194
+#define CLK_ASYNC_FSYSD                        195
+#define CLK_ASYNC_LCD0X                        196
+#define CLK_ASYNC_CAMX                 197
+#define CLK_PPMURIGHT                  198
+#define CLK_GPIO_RIGHT                 199
+#define CLK_ANTIRBK_APBIF              200
+#define CLK_EFUSE_WRITER_APBIF         201
+#define CLK_MONOCNT                    202
+#define CLK_TZPC6                      203
+#define CLK_PROVISIONKEY1              204
+#define CLK_PROVISIONKEY0              205
+#define CLK_CMU_ISPPART                        206
+#define CLK_TMU_APBIF                  207
+#define CLK_KEYIF                      208
+#define CLK_RTC                                209
+#define CLK_WDT                                210
+#define CLK_MCT                                211
+#define CLK_SECKEY                     212
+#define CLK_HDMI_CEC                   213
+#define CLK_TZPC5                      214
+#define CLK_TZPC4                      215
+#define CLK_TZPC3                      216
+#define CLK_TZPC2                      217
+#define CLK_TZPC1                      218
+#define CLK_TZPC0                      219
+#define CLK_CMU_COREPART               220
+#define CLK_CMU_TOPPART                        221
+#define CLK_PMU_APBIF                  222
+#define CLK_SYSREG                     223
+#define CLK_CHIP_ID                    224
+#define CLK_SMMUFIMC_LITE2             225
+#define CLK_FIMC_LITE2                 226
+#define CLK_PIXELASYNCM1               227
+#define CLK_PIXELASYNCM0               228
+#define CLK_PPMUCAMIF                  229
+#define CLK_SMMUJPEG                   230
+#define CLK_SMMUFIMC3                  231
+#define CLK_SMMUFIMC2                  232
+#define CLK_SMMUFIMC1                  233
+#define CLK_SMMUFIMC0                  234
+#define CLK_JPEG                       235
+#define CLK_CSIS1                      236
+#define CLK_CSIS0                      237
+#define CLK_FIMC3                      238
+#define CLK_FIMC2                      239
+#define CLK_FIMC1                      240
+#define CLK_FIMC0                      241
+#define CLK_PPMUTV                     242
+#define CLK_SMMUTV                     243
+#define CLK_HDMI                       244
+#define CLK_MIXER                      245
+#define CLK_VP                         246
+#define CLK_PPMUMFC_R                  247
+#define CLK_PPMUMFC_L                  248
+#define CLK_SMMUMFC_R                  249
+#define CLK_SMMUMFC_L                  250
+#define CLK_MFC                                251
+#define CLK_PPMUG3D                    252
+#define CLK_G3D                                253
+#define CLK_PPMULCD0                   254
+#define CLK_SMMUFIMD0                  255
+#define CLK_DSIM0                      256
+#define CLK_SMIES                      257
+#define CLK_MIE0                       258
+#define CLK_FIMD0                      259
+#define CLK_TSADC                      260
+#define CLK_PPMUFILE                   261
+#define CLK_NFCON                      262
+#define CLK_USBDEVICE                  263
+#define CLK_USBHOST                    264
+#define CLK_SROMC                      265
+#define CLK_SDMMC2                     266
+#define CLK_SDMMC1                     267
+#define CLK_SDMMC0                     268
+#define CLK_PDMA1                      269
+#define CLK_PDMA0                      270
+#define CLK_SPDIF                      271
+#define CLK_PWM                                272
+#define CLK_PCM2                       273
+#define CLK_PCM1                       274
+#define CLK_I2S1                       275
+#define CLK_SPI2                       276
+#define CLK_SPI1                       277
+#define CLK_SPI0                       278
+#define CLK_I2CHDMI                    279
+#define CLK_I2C7                       280
+#define CLK_I2C6                       281
+#define CLK_I2C5                       282
+#define CLK_I2C4                       283
+#define CLK_I2C3                       284
+#define CLK_I2C2                       285
+#define CLK_I2C1                       286
+#define CLK_I2C0                       287
+#define CLK_UART3                      288
+#define CLK_UART2                      289
+#define CLK_UART1                      290
+#define CLK_UART0                      291
+
+/* Special clocks */
+#define CLK_SCLK_PXLAYSNC_CSIS1_FIMC   330
+#define CLK_SCLK_PXLAYSNC_CSIS0_FIMC   331
+#define CLK_SCLK_JPEG                  332
+#define CLK_SCLK_CSIS1                 333
+#define CLK_SCLK_CSIS0                 334
+#define CLK_SCLK_CAM1                  335
+#define CLK_SCLK_FIMC3_LCLK            336
+#define CLK_SCLK_FIMC2_LCLK            337
+#define CLK_SCLK_FIMC1_LCLK            338
+#define CLK_SCLK_FIMC0_LCLK            339
+#define CLK_SCLK_PIXEL                 340
+#define CLK_SCLK_HDMI                  341
+#define CLK_SCLK_MIXER                 342
+#define CLK_SCLK_MFC                   343
+#define CLK_SCLK_G3D                   344
+#define CLK_SCLK_MIPIDPHY4L            345
+#define CLK_SCLK_MIPI0                 346
+#define CLK_SCLK_MDNIE0                        347
+#define CLK_SCLK_FIMD0                 348
+#define CLK_SCLK_PCM0                  349
+#define CLK_SCLK_AUDIO0                        350
+#define CLK_SCLK_TSADC                 351
+#define CLK_SCLK_EBI                   352
+#define CLK_SCLK_MMC2                  353
+#define CLK_SCLK_MMC1                  354
+#define CLK_SCLK_MMC0                  355
+#define CLK_SCLK_I2S                   356
+#define CLK_SCLK_PCM2                  357
+#define CLK_SCLK_PCM1                  358
+#define CLK_SCLK_AUDIO2                        359
+#define CLK_SCLK_AUDIO1                        360
+#define CLK_SCLK_SPDIF                 361
+#define CLK_SCLK_SPI2                  362
+#define CLK_SCLK_SPI1                  363
+#define CLK_SCLK_SPI0                  364
+#define CLK_SCLK_UART3                 365
+#define CLK_SCLK_UART2                 366
+#define CLK_SCLK_UART1                 367
+#define CLK_SCLK_UART0                 368
+#define CLK_SCLK_HDMIPHY               369
+
+/*
+ * Total number of clocks of main CMU.
+ * NOTE: Must be equal to last clock ID increased by one.
+ */
+#define CLK_NR_CLKS                    370
+
+/*
+ * CMU DMC
+ */
+#define CLK_DMC_FOUT_MPLL              1
+#define CLK_DMC_FOUT_BPLL              2
+
+#define CLK_DMC_MOUT_MPLL              3
+#define CLK_DMC_MOUT_BPLL              4
+#define CLK_DMC_MOUT_DPHY              5
+#define CLK_DMC_MOUT_DMC_BUS           6
+
+#define CLK_DMC_DIV_DMC                        7
+#define CLK_DMC_DIV_DPHY               8
+#define CLK_DMC_DIV_DMC_PRE            9
+#define CLK_DMC_DIV_DMCP               10
+#define CLK_DMC_DIV_DMCD               11
+#define CLK_DMC_DIV_MPLL_PRE           12
+
+/*
+ * Total number of clocks of CMU_DMC.
+ * NOTE: Must be equal to highest clock ID increased by one.
+ */
+#define NR_CLKS_DMC                    13
+
+#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS4415_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos7-clk.h b/include/dt-bindings/clock/exynos7-clk.h
new file mode 100644 (file)
index 0000000..8e4681b
--- /dev/null
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Naveen Krishna Ch <naveenkrishna.ch@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#ifndef _DT_BINDINGS_CLOCK_EXYNOS7_H
+#define _DT_BINDINGS_CLOCK_EXYNOS7_H
+
+/* TOPC */
+#define DOUT_ACLK_PERIS                        1
+#define DOUT_SCLK_BUS0_PLL             2
+#define DOUT_SCLK_BUS1_PLL             3
+#define DOUT_SCLK_CC_PLL               4
+#define DOUT_SCLK_MFC_PLL              5
+#define DOUT_ACLK_CCORE_133            6
+#define TOPC_NR_CLK                    7
+
+/* TOP0 */
+#define DOUT_ACLK_PERIC1               1
+#define DOUT_ACLK_PERIC0               2
+#define CLK_SCLK_UART0                 3
+#define CLK_SCLK_UART1                 4
+#define CLK_SCLK_UART2                 5
+#define CLK_SCLK_UART3                 6
+#define TOP0_NR_CLK                    7
+
+/* TOP1 */
+#define DOUT_ACLK_FSYS1_200            1
+#define DOUT_ACLK_FSYS0_200            2
+#define DOUT_SCLK_MMC2                 3
+#define DOUT_SCLK_MMC1                 4
+#define DOUT_SCLK_MMC0                 5
+#define CLK_SCLK_MMC2                  6
+#define CLK_SCLK_MMC1                  7
+#define CLK_SCLK_MMC0                  8
+#define TOP1_NR_CLK                    9
+
+/* CCORE */
+#define PCLK_RTC                       1
+#define CCORE_NR_CLK                   2
+
+/* PERIC0 */
+#define PCLK_UART0                     1
+#define SCLK_UART0                     2
+#define PCLK_HSI2C0                    3
+#define PCLK_HSI2C1                    4
+#define PCLK_HSI2C4                    5
+#define PCLK_HSI2C5                    6
+#define PCLK_HSI2C9                    7
+#define PCLK_HSI2C10                   8
+#define PCLK_HSI2C11                   9
+#define PCLK_PWM                       10
+#define SCLK_PWM                       11
+#define PCLK_ADCIF                     12
+#define PERIC0_NR_CLK                  13
+
+/* PERIC1 */
+#define PCLK_UART1                     1
+#define PCLK_UART2                     2
+#define PCLK_UART3                     3
+#define SCLK_UART1                     4
+#define SCLK_UART2                     5
+#define SCLK_UART3                     6
+#define PCLK_HSI2C2                    7
+#define PCLK_HSI2C3                    8
+#define PCLK_HSI2C6                    9
+#define PCLK_HSI2C7                    10
+#define PCLK_HSI2C8                    11
+#define PERIC1_NR_CLK                  12
+
+/* PERIS */
+#define PCLK_CHIPID                    1
+#define SCLK_CHIPID                    2
+#define PCLK_WDT                       3
+#define PCLK_TMU                       4
+#define SCLK_TMU                       5
+#define PERIS_NR_CLK                   6
+
+/* FSYS0 */
+#define ACLK_MMC2                      1
+#define FSYS0_NR_CLK                   2
+
+/* FSYS1 */
+#define ACLK_MMC1                      1
+#define ACLK_MMC0                      2
+#define FSYS1_NR_CLK                   3
+
+#endif /* _DT_BINDINGS_CLOCK_EXYNOS7_H */
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
new file mode 100644 (file)
index 0000000..591f7fb
--- /dev/null
@@ -0,0 +1,74 @@
+#ifndef __DTS_MARVELL_MMP2_CLOCK_H
+#define __DTS_MARVELL_MMP2_CLOCK_H
+
+/* fixed clocks and plls */
+#define MMP2_CLK_CLK32                 1
+#define MMP2_CLK_VCTCXO                        2
+#define MMP2_CLK_PLL1                  3
+#define MMP2_CLK_PLL1_2                        8
+#define MMP2_CLK_PLL1_4                        9
+#define MMP2_CLK_PLL1_8                        10
+#define MMP2_CLK_PLL1_16               11
+#define MMP2_CLK_PLL1_3                        12
+#define MMP2_CLK_PLL1_6                        13
+#define MMP2_CLK_PLL1_12               14
+#define MMP2_CLK_PLL1_20               15
+#define MMP2_CLK_PLL2                  16
+#define MMP2_CLK_PLL2_2                        17
+#define MMP2_CLK_PLL2_4                        18
+#define MMP2_CLK_PLL2_8                        19
+#define MMP2_CLK_PLL2_16               20
+#define MMP2_CLK_PLL2_3                        21
+#define MMP2_CLK_PLL2_6                        22
+#define MMP2_CLK_PLL2_12               23
+#define MMP2_CLK_VCTCXO_2              24
+#define MMP2_CLK_VCTCXO_4              25
+#define MMP2_CLK_UART_PLL              26
+#define MMP2_CLK_USB_PLL               27
+
+/* apb periphrals */
+#define MMP2_CLK_TWSI0                 60
+#define MMP2_CLK_TWSI1                 61
+#define MMP2_CLK_TWSI2                 62
+#define MMP2_CLK_TWSI3                 63
+#define MMP2_CLK_TWSI4                 64
+#define MMP2_CLK_TWSI5                 65
+#define MMP2_CLK_GPIO                  66
+#define MMP2_CLK_KPC                   67
+#define MMP2_CLK_RTC                   68
+#define MMP2_CLK_PWM0                  69
+#define MMP2_CLK_PWM1                  70
+#define MMP2_CLK_PWM2                  71
+#define MMP2_CLK_PWM3                  72
+#define MMP2_CLK_UART0                 73
+#define MMP2_CLK_UART1                 74
+#define MMP2_CLK_UART2                 75
+#define MMP2_CLK_UART3                 76
+#define MMP2_CLK_SSP0                  77
+#define MMP2_CLK_SSP1                  78
+#define MMP2_CLK_SSP2                  79
+#define MMP2_CLK_SSP3                  80
+
+/* axi periphrals */
+#define MMP2_CLK_SDH0                  101
+#define MMP2_CLK_SDH1                  102
+#define MMP2_CLK_SDH2                  103
+#define MMP2_CLK_SDH3                  104
+#define MMP2_CLK_USB                   105
+#define MMP2_CLK_DISP0                 106
+#define MMP2_CLK_DISP0_MUX             107
+#define MMP2_CLK_DISP0_SPHY            108
+#define MMP2_CLK_DISP1                 109
+#define MMP2_CLK_DISP1_MUX             110
+#define MMP2_CLK_CCIC_ARBITER          111
+#define MMP2_CLK_CCIC0                 112
+#define MMP2_CLK_CCIC0_MIX             113
+#define MMP2_CLK_CCIC0_PHY             114
+#define MMP2_CLK_CCIC0_SPHY            115
+#define MMP2_CLK_CCIC1                 116
+#define MMP2_CLK_CCIC1_MIX             117
+#define MMP2_CLK_CCIC1_PHY             118
+#define MMP2_CLK_CCIC1_SPHY            119
+
+#define MMP2_NR_CLKS                   200
+#endif
diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h
new file mode 100644 (file)
index 0000000..79630b9
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef __DTS_MARVELL_PXA168_CLOCK_H
+#define __DTS_MARVELL_PXA168_CLOCK_H
+
+/* fixed clocks and plls */
+#define PXA168_CLK_CLK32               1
+#define PXA168_CLK_VCTCXO              2
+#define PXA168_CLK_PLL1                        3
+#define PXA168_CLK_PLL1_2              8
+#define PXA168_CLK_PLL1_4              9
+#define PXA168_CLK_PLL1_8              10
+#define PXA168_CLK_PLL1_16             11
+#define PXA168_CLK_PLL1_6              12
+#define PXA168_CLK_PLL1_12             13
+#define PXA168_CLK_PLL1_24             14
+#define PXA168_CLK_PLL1_48             15
+#define PXA168_CLK_PLL1_96             16
+#define PXA168_CLK_PLL1_13             17
+#define PXA168_CLK_PLL1_13_1_5         18
+#define PXA168_CLK_PLL1_2_1_5          19
+#define PXA168_CLK_PLL1_3_16           20
+#define PXA168_CLK_UART_PLL            27
+
+/* apb periphrals */
+#define PXA168_CLK_TWSI0               60
+#define PXA168_CLK_TWSI1               61
+#define PXA168_CLK_TWSI2               62
+#define PXA168_CLK_TWSI3               63
+#define PXA168_CLK_GPIO                        64
+#define PXA168_CLK_KPC                 65
+#define PXA168_CLK_RTC                 66
+#define PXA168_CLK_PWM0                        67
+#define PXA168_CLK_PWM1                        68
+#define PXA168_CLK_PWM2                        69
+#define PXA168_CLK_PWM3                        70
+#define PXA168_CLK_UART0               71
+#define PXA168_CLK_UART1               72
+#define PXA168_CLK_UART2               73
+#define PXA168_CLK_SSP0                        74
+#define PXA168_CLK_SSP1                        75
+#define PXA168_CLK_SSP2                        76
+#define PXA168_CLK_SSP3                        77
+#define PXA168_CLK_SSP4                        78
+
+/* axi periphrals */
+#define PXA168_CLK_DFC                 100
+#define PXA168_CLK_SDH0                        101
+#define PXA168_CLK_SDH1                        102
+#define PXA168_CLK_SDH2                        103
+#define PXA168_CLK_USB                 104
+#define PXA168_CLK_SPH                 105
+#define PXA168_CLK_DISP0               106
+#define PXA168_CLK_CCIC0               107
+#define PXA168_CLK_CCIC0_PHY           108
+#define PXA168_CLK_CCIC0_SPHY          109
+
+#define PXA168_NR_CLKS                 200
+#endif
diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h
new file mode 100644 (file)
index 0000000..719cffb
--- /dev/null
@@ -0,0 +1,54 @@
+#ifndef __DTS_MARVELL_PXA910_CLOCK_H
+#define __DTS_MARVELL_PXA910_CLOCK_H
+
+/* fixed clocks and plls */
+#define PXA910_CLK_CLK32               1
+#define PXA910_CLK_VCTCXO              2
+#define PXA910_CLK_PLL1                        3
+#define PXA910_CLK_PLL1_2              8
+#define PXA910_CLK_PLL1_4              9
+#define PXA910_CLK_PLL1_8              10
+#define PXA910_CLK_PLL1_16             11
+#define PXA910_CLK_PLL1_6              12
+#define PXA910_CLK_PLL1_12             13
+#define PXA910_CLK_PLL1_24             14
+#define PXA910_CLK_PLL1_48             15
+#define PXA910_CLK_PLL1_96             16
+#define PXA910_CLK_PLL1_13             17
+#define PXA910_CLK_PLL1_13_1_5         18
+#define PXA910_CLK_PLL1_2_1_5          19
+#define PXA910_CLK_PLL1_3_16           20
+#define PXA910_CLK_UART_PLL            27
+
+/* apb periphrals */
+#define PXA910_CLK_TWSI0               60
+#define PXA910_CLK_TWSI1               61
+#define PXA910_CLK_TWSI2               62
+#define PXA910_CLK_TWSI3               63
+#define PXA910_CLK_GPIO                        64
+#define PXA910_CLK_KPC                 65
+#define PXA910_CLK_RTC                 66
+#define PXA910_CLK_PWM0                        67
+#define PXA910_CLK_PWM1                        68
+#define PXA910_CLK_PWM2                        69
+#define PXA910_CLK_PWM3                        70
+#define PXA910_CLK_UART0               71
+#define PXA910_CLK_UART1               72
+#define PXA910_CLK_UART2               73
+#define PXA910_CLK_SSP0                        74
+#define PXA910_CLK_SSP1                        75
+
+/* axi periphrals */
+#define PXA910_CLK_DFC                 100
+#define PXA910_CLK_SDH0                        101
+#define PXA910_CLK_SDH1                        102
+#define PXA910_CLK_SDH2                        103
+#define PXA910_CLK_USB                 104
+#define PXA910_CLK_SPH                 105
+#define PXA910_CLK_DISP0               106
+#define PXA910_CLK_CCIC0               107
+#define PXA910_CLK_CCIC0_PHY           108
+#define PXA910_CLK_CCIC0_SPHY          109
+
+#define PXA910_NR_CLKS                 200
+#endif
index 100a08c47692c38c01726482ef26ec10b801278d..f60ce72a2b2c76c23da6fa52c7eb470d39195633 100644 (file)
 #define SCLK_HDMI_CEC          110
 #define SCLK_HEVC_CABAC                111
 #define SCLK_HEVC_CORE         112
+#define SCLK_I2S0_OUT          113
+#define SCLK_SDMMC_DRV         114
+#define SCLK_SDIO0_DRV         115
+#define SCLK_SDIO1_DRV         116
+#define SCLK_EMMC_DRV          117
+#define SCLK_SDMMC_SAMPLE      118
+#define SCLK_SDIO0_SAMPLE      119
+#define SCLK_SDIO1_SAMPLE      120
+#define SCLK_EMMC_SAMPLE       121
 
 #define DCLK_VOP0              190
 #define DCLK_VOP1              191
 #define PCLK_VIO2_H2P          361
 #define PCLK_CPU               362
 #define PCLK_PERI              363
+#define PCLK_DDRUPCTL0         364
+#define PCLK_PUBL0             365
+#define PCLK_DDRUPCTL1         366
+#define PCLK_PUBL1             367
 
 /* hclk gates */
 #define HCLK_GPS               448
index 59822a9958581dc5a1871ec3bc02c756656a836e..b5e6b0069ac7703b927a0c040ffcbd764c1afd89 100644 (file)
@@ -11,7 +11,7 @@
 #define _DT_BINDINGS_THERMAL_THERMAL_H
 
 /* On cooling devices upper and lower limits */
-#define THERMAL_NO_LIMIT               (-1UL)
+#define THERMAL_NO_LIMIT               (~0)
 
 #endif
 
index 6bff83b1f298d479122097beb9fe4ca3bf73bab0..856d381b1d5b83ce923be0f5d7da80ba420c1d14 100644 (file)
@@ -153,6 +153,7 @@ int acpi_unmap_lsapic(int cpu);
 
 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
 int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base);
+int acpi_ioapic_registered(acpi_handle handle, u32 gsi_base);
 void acpi_irq_stats_init(void);
 extern u32 acpi_irq_handled;
 extern u32 acpi_irq_not_handled;
index 0c04917c2f1297f0a013e32fe6e3cf380b967d3a..af84234e1f6e2f3741ccb42f1a89e325f17749e8 100644 (file)
@@ -47,6 +47,7 @@ struct sk_buff;
 
 struct audit_krule {
        int                     vers_ops;
+       u32                     pflags;
        u32                     flags;
        u32                     listnr;
        u32                     action;
@@ -64,6 +65,9 @@ struct audit_krule {
        u64                     prio;
 };
 
+/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
+#define AUDIT_LOGINUID_LEGACY          0x1
+
 struct audit_field {
        u32                             type;
        union {
index 2839c639f0920942d1e835dd464598432e22e974..d936409520f8db609994f7ddab629a99981883dc 100644 (file)
@@ -176,7 +176,7 @@ struct clk_ops {
                                        unsigned long *parent_rate);
        long            (*determine_rate)(struct clk_hw *hw, unsigned long rate,
                                        unsigned long *best_parent_rate,
-                                       struct clk **best_parent_clk);
+                                       struct clk_hw **best_parent_hw);
        int             (*set_parent)(struct clk_hw *hw, u8 index);
        u8              (*get_parent)(struct clk_hw *hw);
        int             (*set_rate)(struct clk_hw *hw, unsigned long rate,
@@ -544,16 +544,14 @@ u8 __clk_get_num_parents(struct clk *clk);
 struct clk *__clk_get_parent(struct clk *clk);
 struct clk *clk_get_parent_by_index(struct clk *clk, u8 index);
 unsigned int __clk_get_enable_count(struct clk *clk);
-unsigned int __clk_get_prepare_count(struct clk *clk);
 unsigned long __clk_get_rate(struct clk *clk);
-unsigned long __clk_get_accuracy(struct clk *clk);
 unsigned long __clk_get_flags(struct clk *clk);
 bool __clk_is_prepared(struct clk *clk);
 bool __clk_is_enabled(struct clk *clk);
 struct clk *__clk_lookup(const char *name);
 long __clk_mux_determine_rate(struct clk_hw *hw, unsigned long rate,
                              unsigned long *best_parent_rate,
-                             struct clk **best_parent_p);
+                             struct clk_hw **best_parent_p);
 
 /*
  * FIXME clock api without lock protection
@@ -652,7 +650,7 @@ static inline void clk_writel(u32 val, u32 __iomem *reg)
 #endif /* platform dependent I/O accessors */
 
 #ifdef CONFIG_DEBUG_FS
-struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode,
+struct dentry *clk_debugfs_add_file(struct clk_hw *hw, char *name, umode_t mode,
                                void *data, const struct file_operations *fops);
 #endif
 
index 74e5341463c91d06c1d5578d93f722358e340929..55ef529a0dbf905995781bd051bf926396b837c5 100644 (file)
@@ -264,7 +264,7 @@ int omap3_noncore_dpll_set_rate_and_parent(struct clk_hw *hw,
 long omap3_noncore_dpll_determine_rate(struct clk_hw *hw,
                                       unsigned long rate,
                                       unsigned long *best_parent_rate,
-                                      struct clk **best_parent_clk);
+                                      struct clk_hw **best_parent_clk);
 unsigned long omap4_dpll_regm4xen_recalc(struct clk_hw *hw,
                                         unsigned long parent_rate);
 long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
@@ -273,7 +273,7 @@ long omap4_dpll_regm4xen_round_rate(struct clk_hw *hw,
 long omap4_dpll_regm4xen_determine_rate(struct clk_hw *hw,
                                        unsigned long rate,
                                        unsigned long *best_parent_rate,
-                                       struct clk **best_parent_clk);
+                                       struct clk_hw **best_parent_clk);
 u8 omap2_init_dpll_parent(struct clk_hw *hw);
 unsigned long omap3_dpll_recalc(struct clk_hw *hw, unsigned long parent_rate);
 long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
index d5ad7b1118fc10748377d9a90d2e960e0f7b611a..a1c81f80978ee4b38bbbb1cdd781c7afa0362c5e 100644 (file)
@@ -186,6 +186,80 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
 #endif
 
+#include <uapi/linux/types.h>
+
+static __always_inline void data_access_exceeds_word_size(void)
+#ifdef __compiletime_warning
+__compiletime_warning("data access exceeds word size and won't be atomic")
+#endif
+;
+
+static __always_inline void data_access_exceeds_word_size(void)
+{
+}
+
+static __always_inline void __read_once_size(volatile void *p, void *res, int size)
+{
+       switch (size) {
+       case 1: *(__u8 *)res = *(volatile __u8 *)p; break;
+       case 2: *(__u16 *)res = *(volatile __u16 *)p; break;
+       case 4: *(__u32 *)res = *(volatile __u32 *)p; break;
+#ifdef CONFIG_64BIT
+       case 8: *(__u64 *)res = *(volatile __u64 *)p; break;
+#endif
+       default:
+               barrier();
+               __builtin_memcpy((void *)res, (const void *)p, size);
+               data_access_exceeds_word_size();
+               barrier();
+       }
+}
+
+static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
+{
+       switch (size) {
+       case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
+       case 2: *(volatile __u16 *)p = *(__u16 *)res; break;
+       case 4: *(volatile __u32 *)p = *(__u32 *)res; break;
+#ifdef CONFIG_64BIT
+       case 8: *(volatile __u64 *)p = *(__u64 *)res; break;
+#endif
+       default:
+               barrier();
+               __builtin_memcpy((void *)p, (const void *)res, size);
+               data_access_exceeds_word_size();
+               barrier();
+       }
+}
+
+/*
+ * Prevent the compiler from merging or refetching reads or writes. The
+ * compiler is also forbidden from reordering successive instances of
+ * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
+ * compiler is aware of some particular ordering.  One way to make the
+ * compiler aware of ordering is to put the two invocations of READ_ONCE,
+ * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
+ *
+ * In contrast to ACCESS_ONCE these two macros will also work on aggregate
+ * data types like structs or unions. If the size of the accessed data
+ * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
+ * READ_ONCE() and ASSIGN_ONCE()  will fall back to memcpy and print a
+ * compile-time warning.
+ *
+ * Their two major use cases are: (1) Mediating communication between
+ * process-level code and irq/NMI handlers, all running on the same CPU,
+ * and (2) Ensuring that the compiler does not  fold, spindle, or otherwise
+ * mutilate accesses that either do not require ordering or that interact
+ * with an explicit memory barrier or atomic instruction that provides the
+ * required ordering.
+ */
+
+#define READ_ONCE(x) \
+       ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
+
+#define ASSIGN_ONCE(val, x) \
+       ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
+
 #endif /* __KERNEL__ */
 
 #endif /* __ASSEMBLY__ */
index c303d383def1146589a30c980d2c8dadd1bd89cc..bd955270d5aae60f77cc8f936a5820d9690ac6ac 100644 (file)
@@ -50,7 +50,7 @@ static inline struct thermal_cooling_device *
 of_cpufreq_cooling_register(struct device_node *np,
                            const struct cpumask *clip_cpus)
 {
-       return NULL;
+       return ERR_PTR(-ENOSYS);
 }
 #endif
 
@@ -65,13 +65,13 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
 static inline struct thermal_cooling_device *
 cpufreq_cooling_register(const struct cpumask *clip_cpus)
 {
-       return NULL;
+       return ERR_PTR(-ENOSYS);
 }
 static inline struct thermal_cooling_device *
 of_cpufreq_cooling_register(struct device_node *np,
                            const struct cpumask *clip_cpus)
 {
-       return NULL;
+       return ERR_PTR(-ENOSYS);
 }
 static inline
 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
index a07e087f54b205741da6c126d0ca2144257140af..ab70f3bc44ad7a2c1ddf2454dac51f7a80e95f41 100644 (file)
@@ -53,7 +53,6 @@ struct cpuidle_state {
 };
 
 /* Idle State Flags */
-#define CPUIDLE_FLAG_TIME_INVALID      (0x01) /* is residency time measurable? */
 #define CPUIDLE_FLAG_COUPLED   (0x02) /* state applies to multiple cpus */
 #define CPUIDLE_FLAG_TIMER_STOP (0x04)  /* timer is stopped on this state */
 
@@ -89,8 +88,6 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
 /**
  * cpuidle_get_last_residency - retrieves the last state's residency time
  * @dev: the target CPU
- *
- * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set
  */
 static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
 {
index f1863dcd83eaf4070cc83762ce9864b9826e1c0e..ce447f0f1bad49e24351cec2c1ae0e2d2620ad4a 100644 (file)
@@ -188,7 +188,7 @@ extern struct devfreq *devm_devfreq_add_device(struct device *dev,
 extern void devm_devfreq_remove_device(struct device *dev,
                                  struct devfreq *devfreq);
 
-/* Supposed to be called by PM_SLEEP/PM_RUNTIME callbacks */
+/* Supposed to be called by PM callbacks */
 extern int devfreq_suspend_device(struct devfreq *devfreq);
 extern int devfreq_resume_device(struct devfreq *devfreq);
 
index b9376cd5a187e818c28c09935eb022a1920848a8..25a822f6f0009f97c47d9f5fb0d27bed0e79fcd5 100644 (file)
@@ -68,6 +68,7 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
  * Number of interrupts per specific IRQ source, since bootup
  */
 extern unsigned int kstat_irqs(unsigned int irq);
+extern unsigned int kstat_irqs_usr(unsigned int irq);
 
 /*
  * Number of interrupts per cpu, since bootup
index 01aad3ed89ecd10d1ad2c6415451871b86524528..fab9b32ace8e2c07cfac5a51f97be8dd4bd1ae96 100644 (file)
@@ -36,9 +36,6 @@ extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
 
 extern int migrate_prep(void);
 extern int migrate_prep_local(void);
-extern int migrate_vmas(struct mm_struct *mm,
-               const nodemask_t *from, const nodemask_t *to,
-               unsigned long flags);
 extern void migrate_page_copy(struct page *newpage, struct page *page);
 extern int migrate_huge_page_move_mapping(struct address_space *mapping,
                                  struct page *newpage, struct page *page);
@@ -57,13 +54,6 @@ static inline int migrate_pages(struct list_head *l, new_page_t new,
 static inline int migrate_prep(void) { return -ENOSYS; }
 static inline int migrate_prep_local(void) { return -ENOSYS; }
 
-static inline int migrate_vmas(struct mm_struct *mm,
-               const nodemask_t *from, const nodemask_t *to,
-               unsigned long flags)
-{
-       return -ENOSYS;
-}
-
 static inline void migrate_page_copy(struct page *newpage,
                                     struct page *page) {}
 
index c0a67b894c4ce5338e845774b3c4e3ad8c325f5f..f80d0194c9bc2fa67b73eadbf93ac65e62434000 100644 (file)
@@ -286,8 +286,6 @@ struct vm_operations_struct {
         */
        struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
                                        unsigned long addr);
-       int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
-               const nodemask_t *to, unsigned long flags);
 #endif
        /* called by sys_remap_file_pages() to populate non-linear mapping */
        int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
index 7ea069cd32579caacc5953802356a62237ac0413..4b3736f7065c496601011b9474368238f9af923a 100644 (file)
@@ -251,7 +251,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping,
 #define FGP_NOWAIT             0x00000020
 
 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
-               int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
+               int fgp_flags, gfp_t cache_gfp_mask);
 
 /**
  * find_get_page - find and get a page reference
@@ -266,13 +266,13 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
 static inline struct page *find_get_page(struct address_space *mapping,
                                        pgoff_t offset)
 {
-       return pagecache_get_page(mapping, offset, 0, 0, 0);
+       return pagecache_get_page(mapping, offset, 0, 0);
 }
 
 static inline struct page *find_get_page_flags(struct address_space *mapping,
                                        pgoff_t offset, int fgp_flags)
 {
-       return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
+       return pagecache_get_page(mapping, offset, fgp_flags, 0);
 }
 
 /**
@@ -292,7 +292,7 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
 static inline struct page *find_lock_page(struct address_space *mapping,
                                        pgoff_t offset)
 {
-       return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
+       return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
 }
 
 /**
@@ -319,7 +319,7 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
 {
        return pagecache_get_page(mapping, offset,
                                        FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
-                                       gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
+                                       gfp_mask);
 }
 
 /**
@@ -340,8 +340,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
 {
        return pagecache_get_page(mapping, index,
                        FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
-                       mapping_gfp_mask(mapping),
-                       GFP_NOFS);
+                       mapping_gfp_mask(mapping));
 }
 
 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
index 44a27696ab6c788d0f16fe678daa63353b9d55aa..360a966a97a5807f7ff13441748c0c93850ff7e1 100644 (file)
@@ -349,6 +349,7 @@ struct pci_dev {
        unsigned int    __aer_firmware_first:1;
        unsigned int    broken_intx_masking:1;
        unsigned int    io_window_1k:1; /* Intel P2P bridge 1K I/O windows */
+       unsigned int    irq_managed:1;
        pci_dev_flags_t dev_flags;
        atomic_t        enable_cnt;     /* pci_enable_device has been called */
 
index 6cd20d5e651b9d751b9555000a6417d10368dee2..a9edab2c787a53e809150034128bc46448a010ad 100644 (file)
@@ -271,6 +271,8 @@ typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
 int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
                        void *data);
 void of_genpd_del_provider(struct device_node *np);
+struct generic_pm_domain *of_genpd_get_from_provider(
+                       struct of_phandle_args *genpdspec);
 
 struct generic_pm_domain *__of_genpd_xlate_simple(
                                        struct of_phandle_args *genpdspec,
@@ -288,6 +290,12 @@ static inline int __of_genpd_add_provider(struct device_node *np,
 }
 static inline void of_genpd_del_provider(struct device_node *np) {}
 
+static inline struct generic_pm_domain *of_genpd_get_from_provider(
+                       struct of_phandle_args *genpdspec)
+{
+       return NULL;
+}
+
 #define __of_genpd_xlate_simple                NULL
 #define __of_genpd_xlate_onecell       NULL
 
index c611a02fbc51246c9ea71e81eb9ccd6a4030d763..fc52e307efab8768effbb7880702986653e0a07c 100644 (file)
@@ -38,7 +38,7 @@
 #define THERMAL_CSTATE_INVALID -1UL
 
 /* No upper/lower limit requirement */
-#define THERMAL_NO_LIMIT       THERMAL_CSTATE_INVALID
+#define THERMAL_NO_LIMIT       ((u32)~0)
 
 /* Unit conversion macros */
 #define KELVIN_TO_CELSIUS(t)   (long)(((long)t-2732 >= 0) ?    \
index a41e252396c0a4d58910858db78a7c6e5e800973..1c5e453f7ea997364a4d26852a2b532ec3b8acd5 100644 (file)
@@ -101,6 +101,11 @@ static inline size_t iov_iter_count(struct iov_iter *i)
        return i->count;
 }
 
+static inline bool iter_is_iovec(struct iov_iter *i)
+{
+       return !(i->type & (ITER_BVEC | ITER_KVEC));
+}
+
 /*
  * Cap the iov_iter by given limit; note that the second argument is
  * *not* the new size - it's upper limit for such.  Passing it a value
index 9d87a37aecad20f1a2aeaaa9cddb929a589d3440..dae99d7d2bc05a265faddb5a4f953d34f99e4ef1 100644 (file)
@@ -688,7 +688,6 @@ extern int sas_queuecommand(struct Scsi_Host * ,struct scsi_cmnd *);
 extern int sas_target_alloc(struct scsi_target *);
 extern int sas_slave_configure(struct scsi_device *);
 extern int sas_change_queue_depth(struct scsi_device *, int new_depth);
-extern int sas_change_queue_type(struct scsi_device *, int qt);
 extern int sas_bios_param(struct scsi_device *,
                          struct block_device *,
                          sector_t capacity, int *hsc);
index e939d2b3757a40cb47c5f5eaf9b3b1b881b14006..019e66858ce64a3cc2e6e990cd26e52669e8a2bb 100644 (file)
@@ -277,19 +277,6 @@ struct scsi_host_template {
         */
        int (* change_queue_depth)(struct scsi_device *, int);
 
-       /*
-        * Fill in this function to allow the changing of tag types
-        * (this also allows the enabling/disabling of tag command
-        * queueing).  An error should only be returned if something
-        * went wrong in the driver while trying to set the tag type.
-        * If the driver doesn't support the requested tag type, then
-        * it should set the closest type it does support without
-        * returning an error.  Returns the actual tag type set.
-        *
-        * Status: OPTIONAL
-        */
-       int (* change_queue_type)(struct scsi_device *, int);
-
        /*
         * This function determines the BIOS parameters for a given
         * harddisk.  These tend to be numbers that are made up by
index fe4a70299419cedc7160b08942bce7367517f3ab..9708b28bd2aa1bf1ca53a044eefef563b4a5b8fc 100644 (file)
@@ -6,46 +6,10 @@
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 
-#define MSG_SIMPLE_TAG 0x20
-#define MSG_HEAD_TAG   0x21
-#define MSG_ORDERED_TAG        0x22
-#define MSG_ACA_TAG    0x24    /* unsupported */
-
 #define SCSI_NO_TAG    (-1)    /* identify no tag in use */
 
 
 #ifdef CONFIG_BLOCK
-
-int scsi_change_queue_type(struct scsi_device *sdev, int tag_type);
-
-/**
- * scsi_get_tag_type - get the type of tag the device supports
- * @sdev:      the scsi device
- */
-static inline int scsi_get_tag_type(struct scsi_device *sdev)
-{
-       if (!sdev->tagged_supported)
-               return 0;
-       if (sdev->simple_tags)
-               return MSG_SIMPLE_TAG;
-       return 0;
-}
-
-static inline void scsi_set_tag_type(struct scsi_device *sdev, int tag)
-{
-       switch (tag) {
-       case MSG_ORDERED_TAG:
-       case MSG_SIMPLE_TAG:
-               sdev->simple_tags = 1;
-               break;
-       case 0:
-               /* fall through */
-       default:
-               sdev->simple_tags = 0;
-               break;
-       }
-}
-
 static inline struct scsi_cmnd *scsi_mq_find_tag(struct Scsi_Host *shost,
                                                 int unique_tag)
 {
index 9adc1bca1178ba36482f63bf8cef1444045f7709..430cfaf92285f177d977d11717599bf1ff85b70b 100644 (file)
@@ -5,6 +5,15 @@
 #define TRANSPORT_PLUGIN_VHBA_PDEV             2
 #define TRANSPORT_PLUGIN_VHBA_VDEV             3
 
+struct target_backend_cits {
+       struct config_item_type tb_dev_cit;
+       struct config_item_type tb_dev_attrib_cit;
+       struct config_item_type tb_dev_pr_cit;
+       struct config_item_type tb_dev_wwn_cit;
+       struct config_item_type tb_dev_alua_tg_pt_gps_cit;
+       struct config_item_type tb_dev_stat_cit;
+};
+
 struct se_subsystem_api {
        struct list_head sub_api_list;
 
@@ -44,6 +53,8 @@ struct se_subsystem_api {
        int (*init_prot)(struct se_device *);
        int (*format_prot)(struct se_device *);
        void (*free_prot)(struct se_device *);
+
+       struct target_backend_cits tb_cits;
 };
 
 struct sbc_ops {
@@ -96,4 +107,36 @@ sense_reason_t      transport_generic_map_mem_to_cmd(struct se_cmd *,
 
 void   array_free(void *array, int n);
 
+/* From target_core_configfs.c to setup default backend config_item_types */
+void   target_core_setup_sub_cits(struct se_subsystem_api *);
+
+/* attribute helpers from target_core_device.c for backend drivers */
+int    se_dev_set_max_unmap_lba_count(struct se_device *, u32);
+int    se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
+int    se_dev_set_unmap_granularity(struct se_device *, u32);
+int    se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+int    se_dev_set_max_write_same_len(struct se_device *, u32);
+int    se_dev_set_emulate_model_alias(struct se_device *, int);
+int    se_dev_set_emulate_dpo(struct se_device *, int);
+int    se_dev_set_emulate_fua_write(struct se_device *, int);
+int    se_dev_set_emulate_fua_read(struct se_device *, int);
+int    se_dev_set_emulate_write_cache(struct se_device *, int);
+int    se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
+int    se_dev_set_emulate_tas(struct se_device *, int);
+int    se_dev_set_emulate_tpu(struct se_device *, int);
+int    se_dev_set_emulate_tpws(struct se_device *, int);
+int    se_dev_set_emulate_caw(struct se_device *, int);
+int    se_dev_set_emulate_3pc(struct se_device *, int);
+int    se_dev_set_pi_prot_type(struct se_device *, int);
+int    se_dev_set_pi_prot_format(struct se_device *, int);
+int    se_dev_set_enforce_pr_isids(struct se_device *, int);
+int    se_dev_set_force_pr_aptpl(struct se_device *, int);
+int    se_dev_set_is_nonrot(struct se_device *, int);
+int    se_dev_set_emulate_rest_reord(struct se_device *dev, int);
+int    se_dev_set_queue_depth(struct se_device *, u32);
+int    se_dev_set_max_sectors(struct se_device *, u32);
+int    se_dev_set_fabric_max_sectors(struct se_device *, u32);
+int    se_dev_set_optimal_sectors(struct se_device *, u32);
+int    se_dev_set_block_size(struct se_device *, u32);
+
 #endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h
new file mode 100644 (file)
index 0000000..3247d75
--- /dev/null
@@ -0,0 +1,120 @@
+#ifndef TARGET_CORE_BACKEND_CONFIGFS_H
+#define TARGET_CORE_BACKEND_CONFIGFS_H
+
+#include <target/configfs_macros.h>
+
+#define DEF_TB_DEV_ATTRIB_SHOW(_backend, _name)                                \
+static ssize_t _backend##_dev_show_attr_##_name(                       \
+       struct se_dev_attrib *da,                                       \
+       char *page)                                                     \
+{                                                                      \
+       return snprintf(page, PAGE_SIZE, "%u\n",                        \
+                       (u32)da->da_dev->dev_attrib._name);             \
+}
+
+#define DEF_TB_DEV_ATTRIB_STORE(_backend, _name)                       \
+static ssize_t _backend##_dev_store_attr_##_name(                      \
+       struct se_dev_attrib *da,                                       \
+       const char *page,                                               \
+       size_t count)                                                   \
+{                                                                      \
+       unsigned long val;                                              \
+       int ret;                                                        \
+                                                                       \
+       ret = kstrtoul(page, 0, &val);                                  \
+       if (ret < 0) {                                                  \
+               pr_err("kstrtoul() failed with ret: %d\n", ret);        \
+               return -EINVAL;                                         \
+       }                                                               \
+       ret = se_dev_set_##_name(da->da_dev, (u32)val);                 \
+                                                                       \
+       return (!ret) ? count : -EINVAL;                                \
+}
+
+#define DEF_TB_DEV_ATTRIB(_backend, _name)                             \
+DEF_TB_DEV_ATTRIB_SHOW(_backend, _name);                               \
+DEF_TB_DEV_ATTRIB_STORE(_backend, _name);
+
+#define DEF_TB_DEV_ATTRIB_RO(_backend, name)                           \
+DEF_TB_DEV_ATTRIB_SHOW(_backend, name);
+
+CONFIGFS_EATTR_STRUCT(target_backend_dev_attrib, se_dev_attrib);
+#define TB_DEV_ATTR(_backend, _name, _mode)                            \
+static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
+               __CONFIGFS_EATTR(_name, _mode,                          \
+               _backend##_dev_show_attr_##_name,                       \
+               _backend##_dev_store_attr_##_name);
+
+#define TB_DEV_ATTR_RO(_backend, _name)                                                \
+static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name = \
+       __CONFIGFS_EATTR_RO(_name,                                      \
+       _backend##_dev_show_attr_##_name);
+
+/*
+ * Default list of target backend device attributes as defined by
+ * struct se_dev_attrib
+ */
+
+#define DEF_TB_DEFAULT_ATTRIBS(_backend)                               \
+       DEF_TB_DEV_ATTRIB(_backend, emulate_model_alias);               \
+       TB_DEV_ATTR(_backend, emulate_model_alias, S_IRUGO | S_IWUSR);  \
+       DEF_TB_DEV_ATTRIB(_backend, emulate_dpo);                       \
+       TB_DEV_ATTR(_backend, emulate_dpo, S_IRUGO | S_IWUSR);          \
+       DEF_TB_DEV_ATTRIB(_backend, emulate_fua_write);                 \
+       TB_DEV_ATTR(_backend, emulate_fua_write, S_IRUGO | S_IWUSR);    \
+       DEF_TB_DEV_ATTRIB(_backend, emulate_fua_read);                  \
+       TB_DEV_ATTR(_backend, emulate_fua_read, S_IRUGO | S_IWUSR);     \
+       DEF_TB_DEV_ATTRIB(_backend, emulate_write_cache);               \
+       TB_DEV_ATTR(_backend, emulate_write_cache, S_IRUGO | S_IWUSR);  \
+       DEF_TB_DEV_ATTRIB(_backend, emulate_ua_intlck_ctrl);            \
+       TB_DEV_ATTR(_backend, emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR); \
+       DEF_TB_DEV_ATTRIB(_backend, emulate_tas);                       \
+       TB_DEV_ATTR(_backend, emulate_tas, S_IRUGO | S_IWUSR);          \
+       DEF_TB_DEV_ATTRIB(_backend, emulate_tpu);                       \
+       TB_DEV_ATTR(_backend, emulate_tpu, S_IRUGO | S_IWUSR);          \
+       DEF_TB_DEV_ATTRIB(_backend, emulate_tpws);                      \
+       TB_DEV_ATTR(_backend, emulate_tpws, S_IRUGO | S_IWUSR);         \
+       DEF_TB_DEV_ATTRIB(_backend, emulate_caw);                       \
+       TB_DEV_ATTR(_backend, emulate_caw, S_IRUGO | S_IWUSR);          \
+       DEF_TB_DEV_ATTRIB(_backend, emulate_3pc);                       \
+       TB_DEV_ATTR(_backend, emulate_3pc, S_IRUGO | S_IWUSR);          \
+       DEF_TB_DEV_ATTRIB(_backend, pi_prot_type);                      \
+       TB_DEV_ATTR(_backend, pi_prot_type, S_IRUGO | S_IWUSR);         \
+       DEF_TB_DEV_ATTRIB_RO(_backend, hw_pi_prot_type);                \
+       TB_DEV_ATTR_RO(_backend, hw_pi_prot_type);                      \
+       DEF_TB_DEV_ATTRIB(_backend, pi_prot_format);                    \
+       TB_DEV_ATTR(_backend, pi_prot_format, S_IRUGO | S_IWUSR);       \
+       DEF_TB_DEV_ATTRIB(_backend, enforce_pr_isids);                  \
+       TB_DEV_ATTR(_backend, enforce_pr_isids, S_IRUGO | S_IWUSR);     \
+       DEF_TB_DEV_ATTRIB(_backend, is_nonrot);                         \
+       TB_DEV_ATTR(_backend, is_nonrot, S_IRUGO | S_IWUSR);            \
+       DEF_TB_DEV_ATTRIB(_backend, emulate_rest_reord);                \
+       TB_DEV_ATTR(_backend, emulate_rest_reord, S_IRUGO | S_IWUSR);   \
+       DEF_TB_DEV_ATTRIB(_backend, force_pr_aptpl);                    \
+       TB_DEV_ATTR(_backend, force_pr_aptpl, S_IRUGO | S_IWUSR);       \
+       DEF_TB_DEV_ATTRIB_RO(_backend, hw_block_size);                  \
+       TB_DEV_ATTR_RO(_backend, hw_block_size);                        \
+       DEF_TB_DEV_ATTRIB(_backend, block_size);                        \
+       TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR);           \
+       DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors);                 \
+       TB_DEV_ATTR_RO(_backend, hw_max_sectors);                       \
+       DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors);                \
+       TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR);   \
+       DEF_TB_DEV_ATTRIB(_backend, optimal_sectors);                   \
+       TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR);      \
+       DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth);                 \
+       TB_DEV_ATTR_RO(_backend, hw_queue_depth);                       \
+       DEF_TB_DEV_ATTRIB(_backend, queue_depth);                       \
+       TB_DEV_ATTR(_backend, queue_depth, S_IRUGO | S_IWUSR);          \
+       DEF_TB_DEV_ATTRIB(_backend, max_unmap_lba_count);               \
+       TB_DEV_ATTR(_backend, max_unmap_lba_count, S_IRUGO | S_IWUSR);  \
+       DEF_TB_DEV_ATTRIB(_backend, max_unmap_block_desc_count);        \
+       TB_DEV_ATTR(_backend, max_unmap_block_desc_count, S_IRUGO | S_IWUSR); \
+       DEF_TB_DEV_ATTRIB(_backend, unmap_granularity);                 \
+       TB_DEV_ATTR(_backend, unmap_granularity, S_IRUGO | S_IWUSR);    \
+       DEF_TB_DEV_ATTRIB(_backend, unmap_granularity_alignment);       \
+       TB_DEV_ATTR(_backend, unmap_granularity_alignment, S_IRUGO | S_IWUSR); \
+       DEF_TB_DEV_ATTRIB(_backend, max_write_same_len);                \
+       TB_DEV_ATTR(_backend, max_write_same_len, S_IRUGO | S_IWUSR);
+
+#endif /* TARGET_CORE_BACKEND_CONFIGFS_H */
index 23c518a0340c095f46fc94742bf8a033798f07fc..397fb635766a96faa94c5b91788ad24fca0d2a34 100644 (file)
@@ -476,6 +476,12 @@ struct se_dif_v1_tuple {
        __be32                  ref_tag;
 };
 
+/* for sam_task_attr */
+#define TCM_SIMPLE_TAG 0x20
+#define TCM_HEAD_TAG   0x21
+#define TCM_ORDERED_TAG        0x22
+#define TCM_ACA_TAG    0x24
+
 struct se_cmd {
        /* SAM response code being sent to initiator */
        u8                      scsi_status;
index 45403443dd82ed3ae3ae07813db72bda164c0de5..04c3c6efdcc22d1a1a787d94b3686440521b9ff9 100644 (file)
 
 #define show_task_attribute_name(val)                          \
        __print_symbolic(val,                                   \
-               { MSG_SIMPLE_TAG,       "SIMPLE"        },      \
-               { MSG_HEAD_TAG,         "HEAD"          },      \
-               { MSG_ORDERED_TAG,      "ORDERED"       },      \
-               { MSG_ACA_TAG,          "ACA"           } )
+               { TCM_SIMPLE_TAG,       "SIMPLE"        },      \
+               { TCM_HEAD_TAG,         "HEAD"          },      \
+               { TCM_ORDERED_TAG,      "ORDERED"       },      \
+               { TCM_ACA_TAG,          "ACA"           } )
 
 #define show_scsi_status_name(val)                             \
        __print_symbolic(val,                                   \
index 12e26683c7061c80e9e6f5f0ba3dbc1a73b32862..d3475e1f15ec193977fa7c7e577127c26e785863 100644 (file)
@@ -371,7 +371,9 @@ enum {
 #define AUDIT_ARCH_PARISC      (EM_PARISC)
 #define AUDIT_ARCH_PARISC64    (EM_PARISC|__AUDIT_ARCH_64BIT)
 #define AUDIT_ARCH_PPC         (EM_PPC)
+/* do not define AUDIT_ARCH_PPCLE since it is not supported by audit */
 #define AUDIT_ARCH_PPC64       (EM_PPC64|__AUDIT_ARCH_64BIT)
+#define AUDIT_ARCH_PPC64LE     (EM_PPC64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
 #define AUDIT_ARCH_S390                (EM_S390)
 #define AUDIT_ARCH_S390X       (EM_S390|__AUDIT_ARCH_64BIT)
 #define AUDIT_ARCH_SH          (EM_SH)
index 7dcfbe6771b1f4a6683b3d816eb0be5f76aa7837..b483d1909d3e6baee31be68e5e905aca4a15572c 100644 (file)
@@ -6,10 +6,6 @@
 #include <linux/types.h>
 #include <linux/uio.h>
 
-#ifndef __packed
-#define __packed                        __attribute__((packed))
-#endif
-
 #define TCMU_VERSION "1.0"
 
 /*
index 9b3565c41502af7bd9198f9a378c3a5cc7562423..eb410083e8e075f9ca1829d0db1bf3cb70d17139 100644 (file)
@@ -395,8 +395,6 @@ retry:
                        case 0:
                                goto out;
                        case -EACCES:
-                               flags |= MS_RDONLY;
-                               goto retry;
                        case -EINVAL:
                                continue;
                }
@@ -419,6 +417,10 @@ retry:
 #endif
                panic("VFS: Unable to mount root fs on %s", b);
        }
+       if (!(flags & MS_RDONLY)) {
+               flags |= MS_RDONLY;
+               goto retry;
+       }
 
        printk("List of all partitions:\n");
        printk_all_partitions();
index aba9d9fadf0c20a50b1b6bec895371244e4a1efd..72ab759a0b43a6400750cefa71650ed64e7a8222 100644 (file)
@@ -429,7 +429,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
  * This function doesn't consume an skb as might be expected since it has to
  * copy it anyways.
  */
-static void kauditd_send_multicast_skb(struct sk_buff *skb)
+static void kauditd_send_multicast_skb(struct sk_buff *skb, gfp_t gfp_mask)
 {
        struct sk_buff          *copy;
        struct audit_net        *aunet = net_generic(&init_net, audit_net_id);
@@ -448,11 +448,11 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb)
         * no reason for new multicast clients to continue with this
         * non-compliance.
         */
-       copy = skb_copy(skb, GFP_KERNEL);
+       copy = skb_copy(skb, gfp_mask);
        if (!copy)
                return;
 
-       nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
+       nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, gfp_mask);
 }
 
 /*
@@ -1940,7 +1940,7 @@ void audit_log_end(struct audit_buffer *ab)
                struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
 
                nlh->nlmsg_len = ab->skb->len;
-               kauditd_send_multicast_skb(ab->skb);
+               kauditd_send_multicast_skb(ab->skb, ab->gfp_mask);
 
                /*
                 * The original kaudit unicast socket sends up messages with
index 3598e13f2a655dbbcfb0df6b6c1bd1afcebd2b8f..4f68a326d92e65883425c3f16f01e78ae55bc255 100644 (file)
@@ -442,19 +442,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
                        f->type = AUDIT_LOGINUID_SET;
                        f->val = 0;
-               }
-
-               if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) {
-                       struct pid *pid;
-                       rcu_read_lock();
-                       pid = find_vpid(f->val);
-                       if (!pid) {
-                               rcu_read_unlock();
-                               err = -ESRCH;
-                               goto exit_free;
-                       }
-                       f->val = pid_nr(pid);
-                       rcu_read_unlock();
+                       entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
                }
 
                err = audit_field_valid(entry, f);
@@ -630,6 +618,13 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
                        data->buflen += data->values[i] =
                                audit_pack_string(&bufp, krule->filterkey);
                        break;
+               case AUDIT_LOGINUID_SET:
+                       if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) {
+                               data->fields[i] = AUDIT_LOGINUID;
+                               data->values[i] = AUDIT_UID_UNSET;
+                               break;
+                       }
+                       /* fallthrough if set */
                default:
                        data->values[i] = f->val;
                }
@@ -646,6 +641,7 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
        int i;
 
        if (a->flags != b->flags ||
+           a->pflags != b->pflags ||
            a->listnr != b->listnr ||
            a->action != b->action ||
            a->field_count != b->field_count)
@@ -764,6 +760,7 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old)
        new = &entry->rule;
        new->vers_ops = old->vers_ops;
        new->flags = old->flags;
+       new->pflags = old->pflags;
        new->listnr = old->listnr;
        new->action = old->action;
        for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
index c75522a83678d14d542f0e1b25a13180762803ca..37c69ab561dad881c9fd76eaac1ed5b48e5bab52 100644 (file)
@@ -1877,12 +1877,18 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
        }
 
 out_alloc:
-       /* unable to find the name from a previous getname(). Allocate a new
-        * anonymous entry.
-        */
-       n = audit_alloc_name(context, AUDIT_TYPE_NORMAL);
+       /* unable to find an entry with both a matching name and type */
+       n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN);
        if (!n)
                return;
+       if (name)
+               /* since name is not NULL we know there is already a matching
+                * name record, see audit_getname(), so there must be a type
+                * mismatch; reuse the string path since the original name
+                * record will keep the string valid until we free it in
+                * audit_free_names() */
+               n->name = name;
+
 out:
        if (parent) {
                n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;
index 113b837470cd65e32038396e7c9fa5af821db03d..4c1ee7f2bebc4bfb1434fe472f0d66f120f8cdc0 100644 (file)
@@ -7477,11 +7477,11 @@ SYSCALL_DEFINE5(perf_event_open,
 
        if (move_group) {
                synchronize_rcu();
-               perf_install_in_context(ctx, group_leader, event->cpu);
+               perf_install_in_context(ctx, group_leader, group_leader->cpu);
                get_ctx(ctx);
                list_for_each_entry(sibling, &group_leader->sibling_list,
                                    group_entry) {
-                       perf_install_in_context(ctx, sibling, event->cpu);
+                       perf_install_in_context(ctx, sibling, sibling->cpu);
                        get_ctx(ctx);
                }
        }
index 4332d766619d1c700c600ec0678bc6c3ca47a6fa..df553b0af936be2aa8f5ee5e1968da0c25e88384 100644 (file)
@@ -78,8 +78,12 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
 
 #ifdef CONFIG_SPARSE_IRQ
 static inline void irq_mark_irq(unsigned int irq) { }
+extern void irq_lock_sparse(void);
+extern void irq_unlock_sparse(void);
 #else
 extern void irq_mark_irq(unsigned int irq);
+static inline void irq_lock_sparse(void) { }
+static inline void irq_unlock_sparse(void) { }
 #endif
 
 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);
index a1782f88f0af3049164be1962e4cfeeb1c776b5c..99793b9b6d23a5bfb4c1b4c2794134b06cdc5132 100644 (file)
@@ -132,6 +132,16 @@ static void free_masks(struct irq_desc *desc)
 static inline void free_masks(struct irq_desc *desc) { }
 #endif
 
+void irq_lock_sparse(void)
+{
+       mutex_lock(&sparse_irq_lock);
+}
+
+void irq_unlock_sparse(void)
+{
+       mutex_unlock(&sparse_irq_lock);
+}
+
 static struct irq_desc *alloc_desc(int irq, int node, struct module *owner)
 {
        struct irq_desc *desc;
@@ -168,6 +178,12 @@ static void free_desc(unsigned int irq)
 
        unregister_irq_proc(irq, desc);
 
+       /*
+        * sparse_irq_lock protects also show_interrupts() and
+        * kstat_irq_usr(). Once we deleted the descriptor from the
+        * sparse tree we can free it. Access in proc will fail to
+        * lookup the descriptor.
+        */
        mutex_lock(&sparse_irq_lock);
        delete_irq_desc(irq);
        mutex_unlock(&sparse_irq_lock);
@@ -574,6 +590,15 @@ void kstat_incr_irq_this_cpu(unsigned int irq)
        kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
 }
 
+/**
+ * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
+ * @irq:       The interrupt number
+ * @cpu:       The cpu number
+ *
+ * Returns the sum of interrupt counts on @cpu since boot for
+ * @irq. The caller must ensure that the interrupt is not removed
+ * concurrently.
+ */
 unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
 {
        struct irq_desc *desc = irq_to_desc(irq);
@@ -582,6 +607,14 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
                        *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
 }
 
+/**
+ * kstat_irqs - Get the statistics for an interrupt
+ * @irq:       The interrupt number
+ *
+ * Returns the sum of interrupt counts on all cpus since boot for
+ * @irq. The caller must ensure that the interrupt is not removed
+ * concurrently.
+ */
 unsigned int kstat_irqs(unsigned int irq)
 {
        struct irq_desc *desc = irq_to_desc(irq);
@@ -594,3 +627,22 @@ unsigned int kstat_irqs(unsigned int irq)
                sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
        return sum;
 }
+
+/**
+ * kstat_irqs_usr - Get the statistics for an interrupt
+ * @irq:       The interrupt number
+ *
+ * Returns the sum of interrupt counts on all cpus since boot for
+ * @irq. Contrary to kstat_irqs() this can be called from any
+ * preemptible context. It's protected against concurrent removal of
+ * an interrupt descriptor when sparse irqs are enabled.
+ */
+unsigned int kstat_irqs_usr(unsigned int irq)
+{
+       int sum;
+
+       irq_lock_sparse();
+       sum = kstat_irqs(irq);
+       irq_unlock_sparse();
+       return sum;
+}
index ac1ba2f110321fe637b2f5538c0ac568dbafc10e..9dc9bfd8a6785902a8573f5dae1d63f4a06c51bc 100644 (file)
 
 #include "internals.h"
 
+/*
+ * Access rules:
+ *
+ * procfs protects read/write of /proc/irq/N/ files against a
+ * concurrent free of the interrupt descriptor. remove_proc_entry()
+ * immediately prevents new read/writes to happen and waits for
+ * already running read/write functions to complete.
+ *
+ * We remove the proc entries first and then delete the interrupt
+ * descriptor from the radix tree and free it. So it is guaranteed
+ * that irq_to_desc(N) is valid as long as the read/writes are
+ * permitted by procfs.
+ *
+ * The read from /proc/interrupts is a different problem because there
+ * is no protection. So the lookup and the access to irqdesc
+ * information must be protected by sparse_irq_lock.
+ */
 static struct proc_dir_entry *root_irq_dir;
 
 #ifdef CONFIG_SMP
@@ -437,9 +454,10 @@ int show_interrupts(struct seq_file *p, void *v)
                seq_putc(p, '\n');
        }
 
+       irq_lock_sparse();
        desc = irq_to_desc(i);
        if (!desc)
-               return 0;
+               goto outsparse;
 
        raw_spin_lock_irqsave(&desc->lock, flags);
        for_each_online_cpu(j)
@@ -479,6 +497,8 @@ int show_interrupts(struct seq_file *p, void *v)
        seq_putc(p, '\n');
 out:
        raw_spin_unlock_irqrestore(&desc->lock, flags);
+outsparse:
+       irq_unlock_sparse();
        return 0;
 }
 #endif
index 6e7708c2c21f9c7a4e3142a8100bf0aa3852a264..48b28d387c7f77b2e3d36bc751b3e221c7634d67 100644 (file)
@@ -94,7 +94,7 @@ config PM_STD_PARTITION
 config PM_SLEEP
        def_bool y
        depends on SUSPEND || HIBERNATE_CALLBACKS
-       select PM_RUNTIME
+       select PM
 
 config PM_SLEEP_SMP
        def_bool y
@@ -130,23 +130,19 @@ config PM_WAKELOCKS_GC
        depends on PM_WAKELOCKS
        default y
 
-config PM_RUNTIME
-       bool "Run-time PM core functionality"
+config PM
+       bool "Device power management core functionality"
        ---help---
          Enable functionality allowing I/O devices to be put into energy-saving
-         (low power) states at run time (or autosuspended) after a specified
-         period of inactivity and woken up in response to a hardware-generated
+         (low power) states, for example after a specified period of inactivity
+         (autosuspended), and woken up in response to a hardware-generated
          wake-up event or a driver's request.
 
          Hardware support is generally required for this functionality to work
          and the bus type drivers of the buses the devices are on are
-         responsible for the actual handling of the autosuspend requests and
+         responsible for the actual handling of device suspend requests and
          wake-up events.
 
-config PM
-       def_bool y
-       depends on PM_SLEEP || PM_RUNTIME
-
 config PM_DEBUG
        bool "Power Management Debug Support"
        depends on PM
index 4d54b7540585a83c41a2dc0da34b1f9e7e9077d7..1363d58f07e976475ffc583dbf16c81c120b1a16 100644 (file)
@@ -847,7 +847,6 @@ void tick_nohz_idle_enter(void)
 
        local_irq_enable();
 }
-EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
 
 /**
  * tick_nohz_irq_exit - update next tick event from interrupt exit
@@ -974,7 +973,6 @@ void tick_nohz_idle_exit(void)
 
        local_irq_enable();
 }
-EXPORT_SYMBOL_GPL(tick_nohz_idle_exit);
 
 static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
 {
index e8905bc3cbd7c546dd9c1e586b61abac9d38cc8f..673e4581a2e541b44b02cd8ef201772dad5311a1 100644 (file)
@@ -1046,8 +1046,7 @@ EXPORT_SYMBOL(find_lock_entry);
  * @mapping: the address_space to search
  * @offset: the page index
  * @fgp_flags: PCG flags
- * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
- * @radix_gfp_mask: gfp mask to use for radix tree node allocation
+ * @gfp_mask: gfp mask to use for the page cache data page allocation
  *
  * Looks up the page cache slot at @mapping & @offset.
  *
@@ -1056,11 +1055,9 @@ EXPORT_SYMBOL(find_lock_entry);
  * FGP_ACCESSED: the page will be marked accessed
  * FGP_LOCK: Page is return locked
  * FGP_CREAT: If page is not present then a new page is allocated using
- *             @cache_gfp_mask and added to the page cache and the VM's LRU
- *             list. If radix tree nodes are allocated during page cache
- *             insertion then @radix_gfp_mask is used. The page is returned
- *             locked and with an increased refcount. Otherwise, %NULL is
- *             returned.
+ *             @gfp_mask and added to the page cache and the VM's LRU
+ *             list. The page is returned locked and with an increased
+ *             refcount. Otherwise, %NULL is returned.
  *
  * If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
  * if the GFP flags specified for FGP_CREAT are atomic.
@@ -1068,7 +1065,7 @@ EXPORT_SYMBOL(find_lock_entry);
  * If there is a page cache page, it is returned with an increased refcount.
  */
 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
-       int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
+       int fgp_flags, gfp_t gfp_mask)
 {
        struct page *page;
 
@@ -1105,13 +1102,11 @@ no_page:
        if (!page && (fgp_flags & FGP_CREAT)) {
                int err;
                if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
-                       cache_gfp_mask |= __GFP_WRITE;
-               if (fgp_flags & FGP_NOFS) {
-                       cache_gfp_mask &= ~__GFP_FS;
-                       radix_gfp_mask &= ~__GFP_FS;
-               }
+                       gfp_mask |= __GFP_WRITE;
+               if (fgp_flags & FGP_NOFS)
+                       gfp_mask &= ~__GFP_FS;
 
-               page = __page_cache_alloc(cache_gfp_mask);
+               page = __page_cache_alloc(gfp_mask);
                if (!page)
                        return NULL;
 
@@ -1122,7 +1117,8 @@ no_page:
                if (fgp_flags & FGP_ACCESSED)
                        __SetPageReferenced(page);
 
-               err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
+               err = add_to_page_cache_lru(page, mapping, offset,
+                               gfp_mask & GFP_RECLAIM_MASK);
                if (unlikely(err)) {
                        page_cache_release(page);
                        page = NULL;
@@ -2443,8 +2439,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
                fgp_flags |= FGP_NOFS;
 
        page = pagecache_get_page(mapping, index, fgp_flags,
-                       mapping_gfp_mask(mapping),
-                       GFP_KERNEL);
+                       mapping_gfp_mask(mapping));
        if (page)
                wait_for_stable_page(page);
 
@@ -2464,7 +2459,7 @@ ssize_t generic_perform_write(struct file *file,
        /*
         * Copies from kernel address space cannot fail (NFSD is a big user).
         */
-       if (segment_eq(get_fs(), KERNEL_DS))
+       if (!iter_is_iovec(i))
                flags |= AOP_FLAG_UNINTERRUPTIBLE;
 
        do {
index 0ca1df9075ab7ff3cbe24c0ff06ec19a65701a93..a900759cc8075fc8b0da9a37ebf6f93de34d8d10 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -968,7 +968,7 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
 
        pudp = pud_offset(&pgd, addr);
        do {
-               pud_t pud = ACCESS_ONCE(*pudp);
+               pud_t pud = READ_ONCE(*pudp);
 
                next = pud_addr_end(addr, end);
                if (pud_none(pud))
index d8aebc52265f59e75e342051fa604c5f1003f59b..ca920d1fd314a17c7250d7916bd37403afa96b79 100644 (file)
@@ -2378,12 +2378,12 @@ void unmap_mapping_range(struct address_space *mapping,
                details.last_index = ULONG_MAX;
 
 
-       i_mmap_lock_read(mapping);
+       i_mmap_lock_write(mapping);
        if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
                unmap_mapping_range_tree(&mapping->i_mmap, &details);
        if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
                unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
-       i_mmap_unlock_read(mapping);
+       i_mmap_unlock_write(mapping);
 }
 EXPORT_SYMBOL(unmap_mapping_range);
 
@@ -3195,7 +3195,16 @@ static int handle_pte_fault(struct mm_struct *mm,
        pte_t entry;
        spinlock_t *ptl;
 
-       entry = ACCESS_ONCE(*pte);
+       /*
+        * some architectures can have larger ptes than wordsize,
+        * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and CONFIG_32BIT=y,
+        * so READ_ONCE or ACCESS_ONCE cannot guarantee atomic accesses.
+        * The code below just needs a consistent view for the ifs and
+        * we later double check anyway with the ptl lock held. So here
+        * a barrier will do.
+        */
+       entry = *pte;
+       barrier();
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
                        if (vma->vm_ops) {
index f22c55947181d87e997b0cadd76c57f36d5a83f8..0e0961b8c39ceb18a7eca485753d1a74c905d353 100644 (file)
@@ -1041,10 +1041,6 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
 
        down_read(&mm->mmap_sem);
 
-       err = migrate_vmas(mm, from, to, flags);
-       if (err)
-               goto out;
-
        /*
         * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
         * bit in 'to' is not also set in 'tmp'.  Clear the found 'source'
@@ -1124,7 +1120,6 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
                if (err < 0)
                        break;
        }
-out:
        up_read(&mm->mmap_sem);
        if (err < 0)
                return err;
index b1d02127e1be8d9fcf3aac91e6943f90e0e0fa1e..344cdf692fc8060b20022a559dd222fded2a6e12 100644 (file)
@@ -1536,27 +1536,6 @@ out:
        return err;
 }
 
-/*
- * Call migration functions in the vma_ops that may prepare
- * memory in a vm for migration. migration functions may perform
- * the migration for vmas that do not have an underlying page struct.
- */
-int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
-       const nodemask_t *from, unsigned long flags)
-{
-       struct vm_area_struct *vma;
-       int err = 0;
-
-       for (vma = mm->mmap; vma && !err; vma = vma->vm_next) {
-               if (vma->vm_ops && vma->vm_ops->migrate) {
-                       err = vma->vm_ops->migrate(vma, to, from, flags);
-                       if (err)
-                               break;
-               }
-       }
-       return err;
-}
-
 #ifdef CONFIG_NUMA_BALANCING
 /*
  * Returns true if this is a safe migration target node for misplaced NUMA
index 45ba250babd86674f0320e0a19dc9cf5916fc864..c5bc241127b205734eaef62964d6d152941174cc 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -583,7 +583,8 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
         * without holding anon_vma lock for write.  So when looking for a
         * genuine pmde (in which to find pte), test present and !THP together.
         */
-       pmde = ACCESS_ONCE(*pmd);
+       pmde = *pmd;
+       barrier();
        if (!pmd_present(pmde) || pmd_trans_huge(pmde))
                pmd = NULL;
 out:
index 185836ba53ef6e23a63b6a0de7c22ebae19eac0b..73ba1df7c8ba1bcf17f0ef2ee0930c13db56730e 100644 (file)
@@ -1536,7 +1536,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
         * holes of a sparse file, we actually need to allocate those pages,
         * and even mark them dirty, so it cannot exceed the max_blocks limit.
         */
-       if (segment_eq(get_fs(), KERNEL_DS))
+       if (!iter_is_iovec(to))
                sgp = SGP_DIRTY;
 
        index = *ppos >> PAGE_CACHE_SHIFT;
index 70bbde65e4cab3a1e7288257d8d2164a905ba9ee..a2c33a4dc7bab45e520556557924b62d24951d85 100644 (file)
@@ -372,7 +372,6 @@ struct file *sock_alloc_file(struct socket *sock, int flags, const char *dname)
        path.mnt = mntget(sock_mnt);
 
        d_instantiate(path.dentry, SOCK_INODE(sock));
-       SOCK_INODE(sock)->i_fop = &socket_file_ops;
 
        file = alloc_file(&path, FMODE_READ | FMODE_WRITE,
                  &socket_file_ops);
index 5374b1bdf02f8793ad0a84c677a73e6cfa08b773..edd2794569db96a052579b3700b30ac9335510a4 100644 (file)
@@ -185,6 +185,18 @@ modbuiltin := -f $(srctree)/scripts/Makefile.modbuiltin obj
 # $(Q)$(MAKE) $(dtbinst)=dir
 dtbinst := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.dtbinst obj
 
+###
+# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.clean obj=
+# Usage:
+# $(Q)$(MAKE) $(clean)=dir
+clean := -f $(srctree)/scripts/Makefile.clean obj
+
+###
+# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.headersinst obj=
+# Usage:
+# $(Q)$(MAKE) $(hdr-inst)=dir
+hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
+
 # Prefix -I with $(srctree) if it is not an absolute path.
 # skip if -I has no parameter
 addtree = $(if $(patsubst -I%,%,$(1)), \
index b1c668dc68150b4780d0c5ff0c69cec5278c2daf..1bca180db8ad0b6475464f3ca8ea78c881c1b1c5 100644 (file)
@@ -7,10 +7,7 @@ src := $(obj)
 PHONY := __clean
 __clean:
 
-# Shorthand for $(Q)$(MAKE) scripts/Makefile.clean obj=dir
-# Usage:
-# $(Q)$(MAKE) $(clean)=dir
-clean := -f $(srctree)/scripts/Makefile.clean obj
+include scripts/Kbuild.include
 
 # The filename Kbuild has precedence over Makefile
 kbuild-dir := $(if $(filter /%,$(src)),$(src),$(srctree)/$(src))
@@ -91,11 +88,6 @@ PHONY += $(subdir-ymn)
 $(subdir-ymn):
        $(Q)$(MAKE) $(clean)=$@
 
-# If quiet is set, only print short version of command
-
-cmd = @$(if $($(quiet)cmd_$(1)),echo '  $($(quiet)cmd_$(1))' &&) $(cmd_$(1))
-
-
 # Declare the contents of the .PHONY variable as phony.  We keep that
 # information in a variable se we can use it in if_changed and friends.
 
index 8ccf83056a7ab6b260a028a9ee9a422e6949525e..1106d6ca3a384baa81b077b599d83b8e9941108e 100644 (file)
@@ -122,7 +122,6 @@ $(check-file): scripts/headers_check.pl $(output-files) FORCE
 endif
 
 # Recursion
-hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
 .PHONY: $(subdirs)
 $(subdirs):
        $(Q)$(MAKE) $(hdr-inst)=$(obj)/$@ dst=$(_dst)/$@
index 556456ca761c0035b314b579c9c38d5b38c3b62c..3b7eec24fb5a2a7cf6b55c4f003e363591888164 100644 (file)
@@ -8,7 +8,7 @@
 // Confidence: High
 // Copyright: (C) 2014 Himangi Saraogi.  GPLv2.
 // Comments:
-// Options: --no-includes, --include-headers
+// Options: --no-includes --include-headers
 
 virtual patch
 virtual context
index 95ece06599a58b78f5f4e3955c33acecda34af10..d4dc4de5cea1f9134d810d2bdc3614187187571d 100755 (executable)
@@ -19,8 +19,6 @@ for arch in ${archs}; do
        case ${arch} in
        um)        # no userspace export
                ;;
-       cris)      # headers export are known broken
-               ;;
        *)
                if [ -d ${srctree}/arch/${arch} ]; then
                        do_command $1 ${arch}
index 14cea7463a621b33c6c0b8487781db107d764841..4dd37552abc2cb5c4ba4afd6b3a70c6c3dfebdfe 100644 (file)
@@ -330,10 +330,10 @@ static void set_subtitle(void)
        list_for_each_entry(sp, &trail, entries) {
                if (sp->text) {
                        if (pos) {
-                               pos->next = xcalloc(sizeof(*pos), 1);
+                               pos->next = xcalloc(1, sizeof(*pos));
                                pos = pos->next;
                        } else {
-                               subtitles = pos = xcalloc(sizeof(*pos), 1);
+                               subtitles = pos = xcalloc(1, sizeof(*pos));
                        }
                        pos->text = sp->text;
                }
index a26cc5d2a9b0217d9c3d52bf0dab21b7337e95d2..72c9dba84c5dbd46cb8ae2824a85d54603dbaafe 100644 (file)
@@ -548,7 +548,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
 {
        int i, j;
        struct menu *submenu[8], *menu, *location = NULL;
-       struct jump_key *jump;
+       struct jump_key *jump = NULL;
 
        str_printf(r, _("Prompt: %s\n"), _(prop->text));
        menu = prop->menu->parent;
@@ -586,7 +586,7 @@ static void get_prompt_str(struct gstr *r, struct property *prop,
                str_printf(r, _("  Location:\n"));
                for (j = 4; --i >= 0; j += 2) {
                        menu = submenu[i];
-                       if (head && location && menu == location)
+                       if (jump && menu == location)
                                jump->offset = strlen(r->s);
                        str_printf(r, "%*c-> %s", j, ' ',
                                   _(menu_get_prompt(menu)));
index 13957602f7ca5eb190170450b79ff877cd0cb3af..d9ab94b17de0bc119a6fbf3958886d1735085fdf 100755 (executable)
@@ -117,6 +117,7 @@ echo 'mv vmlinux.bz2 $RPM_BUILD_ROOT'"/boot/vmlinux-$KERNELRELEASE.bz2"
 echo 'mv vmlinux.orig vmlinux'
 echo "%endif"
 
+if ! $PREBUILT; then
 echo 'rm -f $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE/{build,source}"
 echo "mkdir -p "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNELRELEASE"
 echo "EXCLUDES=\"$RCS_TAR_IGNORE --exclude .tmp_versions --exclude=*vmlinux* --exclude=*.o --exclude=*.ko --exclude=*.cmd --exclude=Documentation --exclude=firmware --exclude .config.old --exclude .missing-syscalls.d\""
@@ -124,6 +125,7 @@ echo "tar "'$EXCLUDES'" -cf- . | (cd "'$RPM_BUILD_ROOT'"/usr/src/kernels/$KERNEL
 echo 'cd $RPM_BUILD_ROOT'"/lib/modules/$KERNELRELEASE"
 echo "ln -sf /usr/src/kernels/$KERNELRELEASE build"
 echo "ln -sf /usr/src/kernels/$KERNELRELEASE source"
+fi
 
 echo ""
 echo "%clean"
@@ -151,9 +153,11 @@ echo "%files headers"
 echo '%defattr (-, root, root)'
 echo "/usr/include"
 echo ""
+if ! $PREBUILT; then
 echo "%files devel"
 echo '%defattr (-, root, root)'
 echo "/usr/src/kernels/$KERNELRELEASE"
 echo "/lib/modules/$KERNELRELEASE/build"
 echo "/lib/modules/$KERNELRELEASE/source"
 echo ""
+fi
index 9bc556b15a92b337d040f65f777a952415e66a9f..67ade0775a5b21b45329ef54db02a5a812b966ae 100644 (file)
@@ -19,7 +19,7 @@ static int hw_rule_rate(struct snd_pcm_hw_params *params,
                .min = UINT_MAX, .max = 0, .integer = 1
        };
        struct snd_oxfw_stream_formation formation;
-       unsigned int i, err;
+       int i, err;
 
        for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; i++) {
                if (formats[i] == NULL)
@@ -47,7 +47,7 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
        const struct snd_interval *r =
                hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
        struct snd_oxfw_stream_formation formation;
-       unsigned int i, j, err;
+       int i, j, err;
        unsigned int count, list[SND_OXFW_STREAM_FORMAT_ENTRIES] = {0};
 
        count = 0;
@@ -80,7 +80,7 @@ static int hw_rule_channels(struct snd_pcm_hw_params *params,
 static void limit_channels_and_rates(struct snd_pcm_hardware *hw, u8 **formats)
 {
        struct snd_oxfw_stream_formation formation;
-       unsigned int i, err;
+       int i, err;
 
        hw->channels_min = UINT_MAX;
        hw->channels_max = 0;
index 604808e5526d4d4fc72b7118c22ccf5c413f8df7..8ba4f9f262b87f3a302f696a544809cdfb6dc5ed 100644 (file)
@@ -15,7 +15,7 @@ static void proc_read_formation(struct snd_info_entry *entry,
        struct snd_oxfw_stream_formation formation, curr;
        u8 *format;
        char flag;
-       unsigned int i, err;
+       int i, err;
 
        /* Show input. */
        err = snd_oxfw_stream_get_current_formation(oxfw,
index b77cf80f1678438fb51d7a871255c7c814ae4dbd..bda845afb470703ff0c05bd69f1caac5bfeabbd6 100644 (file)
@@ -61,7 +61,8 @@ static int set_stream_format(struct snd_oxfw *oxfw, struct amdtp_stream *s,
        u8 **formats;
        struct snd_oxfw_stream_formation formation;
        enum avc_general_plug_dir dir;
-       unsigned int i, err, len;
+       unsigned int len;
+       int i, err;
 
        if (s == &oxfw->tx_stream) {
                formats = oxfw->tx_stream_formats;
index cf1d0b55e827236992a0d073c427cbc0e620e4b0..60e5cad0531aeb181d4cc6558f44b4cd6896f070 100644 (file)
@@ -43,7 +43,7 @@ static bool detect_loud_models(struct fw_unit *unit)
        err = fw_csr_string(unit->directory, CSR_MODEL,
                            model, sizeof(model));
        if (err < 0)
-               return err;
+               return false;
 
        for (i = 0; i < ARRAY_SIZE(models); i++) {
                if (strcmp(models[i], model) == 0)
index 48380ce2c81bc2033ec7f6e263de36e2c5568ccc..aeea679b2281ca261a288fdaaca934d75bd3a20d 100644 (file)
@@ -1367,9 +1367,9 @@ struct hpi_control_cache_single {
 struct hpi_control_cache_pad {
        struct hpi_control_cache_info i;
        u32 field_valid_flags;
-       u8 c_channel[8];
-       u8 c_artist[40];
-       u8 c_title[40];
+       u8 c_channel[40];
+       u8 c_artist[100];
+       u8 c_title[100];
        u8 c_comment[200];
        u32 pTY;
        u32 pI;
index e9146e53bd502c2ce29320ae24e67e8d96c0d5dc..6623ab11003814fe8ebafcee1f08f4184370a46a 100644 (file)
@@ -11,13 +11,13 @@ Production releases have even minor version.
 /* Use single digits for versions less that 10 to avoid octal. */
 /* *** HPI_VER is the only edit required to update version *** */
 /** HPI version */
-#define HPI_VER HPI_VERSION_CONSTRUCTOR(4, 10, 1)
+#define HPI_VER HPI_VERSION_CONSTRUCTOR(4, 14, 3)
 
 /** HPI version string in dotted decimal format */
-#define HPI_VER_STRING "4.10.01"
+#define HPI_VER_STRING "4.14.03"
 
 /** Library version as documented in hpi-api-versions.txt */
-#define HPI_LIB_VER  HPI_VERSION_CONSTRUCTOR(10, 2, 0)
+#define HPI_LIB_VER  HPI_VERSION_CONSTRUCTOR(10, 4, 0)
 
 /** Construct hpi version number from major, minor, release numbers */
 #define HPI_VERSION_CONSTRUCTOR(maj, min, r) ((maj << 16) + (min << 8) + r)
index ac9163770013a8adeab493ff4585f70899afd6c0..3603c24f34d2d9a632223480fd5ec0d06023dd8b 100644 (file)
@@ -1,8 +1,9 @@
-/***********************************************************************/
-/**
+/***********************************************************************
 
     AudioScience HPI driver
-    Copyright (C) 1997-2011  AudioScience Inc. <support@audioscience.com>
+    Functions for reading DSP code using hotplug firmware loader
+
+    Copyright (C) 1997-2014  AudioScience Inc. <support@audioscience.com>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of version 2 of the GNU General Public License as
     along with this program; if not, write to the Free Software
     Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 
-\file
-Functions for reading DSP code using
-hotplug firmware loader from individual dsp code files
-*/
-/***********************************************************************/
+***********************************************************************/
 #define SOURCEFILE_NAME "hpidspcd.c"
 #include "hpidspcd.h"
 #include "hpidebug.h"
@@ -68,17 +65,18 @@ short hpi_dsp_code_open(u32 adapter, void *os_data, struct dsp_code *dsp_code,
                goto error2;
        }
 
-       if ((header.version >> 9) != (HPI_VER >> 9)) {
-               /* Consider even and subsequent odd minor versions to be compatible */
-               dev_err(&dev->dev, "Incompatible firmware version DSP image %X != Driver %X\n",
+       if (HPI_VER_MAJOR(header.version) != HPI_VER_MAJOR(HPI_VER)) {
+               /* Major version change probably means Host-DSP protocol change */
+               dev_err(&dev->dev,
+                       "Incompatible firmware version DSP image %X != Driver %X\n",
                        header.version, HPI_VER);
                goto error2;
        }
 
        if (header.version != HPI_VER) {
-               dev_info(&dev->dev,
-                        "Firmware: release version mismatch  DSP image %X != Driver %X\n",
-                        header.version, HPI_VER);
+               dev_warn(&dev->dev,
+                       "Firmware version mismatch: DSP image %X != Driver %X\n",
+                       header.version, HPI_VER);
        }
 
        HPI_DEBUG_LOG(DEBUG, "dsp code %s opened\n", fw_name);
index 8276a743e22ef2e1c234dcc291106fa627858b14..0cfc9c8c4b4e811ac6f0b6bbeb0d24377c95232d 100644 (file)
@@ -1922,10 +1922,18 @@ int azx_mixer_create(struct azx *chip)
 EXPORT_SYMBOL_GPL(azx_mixer_create);
 
 
+static bool is_input_stream(struct azx *chip, unsigned char index)
+{
+       return (index >= chip->capture_index_offset &&
+               index < chip->capture_index_offset + chip->capture_streams);
+}
+
 /* initialize SD streams */
 int azx_init_stream(struct azx *chip)
 {
        int i;
+       int in_stream_tag = 0;
+       int out_stream_tag = 0;
 
        /* initialize each stream (aka device)
         * assign the starting bdl address to each stream (device)
@@ -1938,9 +1946,21 @@ int azx_init_stream(struct azx *chip)
                azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
                /* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
                azx_dev->sd_int_sta_mask = 1 << i;
-               /* stream tag: must be non-zero and unique */
                azx_dev->index = i;
-               azx_dev->stream_tag = i + 1;
+
+               /* stream tag must be unique throughout
+                * the stream direction group,
+                * valid values 1...15
+                * use separate stream tag if the flag
+                * AZX_DCAPS_SEPARATE_STREAM_TAG is used
+                */
+               if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
+                       azx_dev->stream_tag =
+                               is_input_stream(chip, i) ?
+                               ++in_stream_tag :
+                               ++out_stream_tag;
+               else
+                       azx_dev->stream_tag = i + 1;
        }
 
        return 0;
index 63b69f750d8e24289c906d0c3c9eac0278ce1722..b680b4ec63313c8b1152390dbbf602018a212952 100644 (file)
@@ -3218,12 +3218,13 @@ static int create_input_ctls(struct hda_codec *codec)
        }
 
        /* add stereo mix when explicitly enabled via hint */
-       if (mixer && spec->add_stereo_mix_input &&
-           snd_hda_get_bool_hint(codec, "add_stereo_mix_input") > 0) {
+       if (mixer && spec->add_stereo_mix_input == HDA_HINT_STEREO_MIX_ENABLE) {
                err = parse_capture_source(codec, mixer, CFG_IDX_MIX, num_adcs,
                                           "Stereo Mix", 0);
                if (err < 0)
                        return err;
+               else
+                       spec->suppress_auto_mic = 1;
        }
 
        return 0;
@@ -4542,9 +4543,8 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
 
        /* add stereo mix if available and not enabled yet */
        if (!spec->auto_mic && spec->mixer_nid &&
-           spec->add_stereo_mix_input &&
-           spec->input_mux.num_items > 1 &&
-           snd_hda_get_bool_hint(codec, "add_stereo_mix_input") < 0) {
+           spec->add_stereo_mix_input == HDA_HINT_STEREO_MIX_AUTO &&
+           spec->input_mux.num_items > 1) {
                err = parse_capture_source(codec, spec->mixer_nid,
                                           CFG_IDX_MIX, spec->num_all_adcs,
                                           "Stereo Mix", 0);
index 61dd5153f512b99a26d44305dc2e17b723322077..3d852660443aafedf7450b39692b70aa1d59921a 100644 (file)
@@ -222,7 +222,7 @@ struct hda_gen_spec {
        unsigned int vmaster_mute_enum:1; /* add vmaster mute mode enum */
        unsigned int indep_hp:1; /* independent HP supported */
        unsigned int prefer_hp_amp:1; /* enable HP amp for speaker if any */
-       unsigned int add_stereo_mix_input:1; /* add aamix as a capture src */
+       unsigned int add_stereo_mix_input:2; /* add aamix as a capture src */
        unsigned int add_jack_modes:1; /* add i/o jack mode enum ctls */
        unsigned int power_down_unused:1; /* power down unused widgets */
        unsigned int dac_min_mute:1; /* minimal = mute for DACs */
@@ -291,6 +291,13 @@ struct hda_gen_spec {
                                    struct hda_jack_callback *cb);
 };
 
+/* values for add_stereo_mix_input flag */
+enum {
+       HDA_HINT_STEREO_MIX_DISABLE,    /* No stereo mix input */
+       HDA_HINT_STEREO_MIX_ENABLE,     /* Add stereo mix input */
+       HDA_HINT_STEREO_MIX_AUTO,       /* Add only if auto-mic is disabled */
+};
+
 int snd_hda_gen_spec_init(struct hda_gen_spec *spec);
 
 int snd_hda_gen_init(struct hda_codec *codec);
index 2bf0b568e3de40bfcaebd77914371f6800c125da..d426a0bd6a5f7e86adf482c83ad86e3203a8a3ad 100644 (file)
@@ -299,6 +299,9 @@ enum {
         AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_POWERWELL |\
         AZX_DCAPS_SNOOP_TYPE(SCH))
 
+#define AZX_DCAPS_INTEL_SKYLAKE \
+       (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG)
+
 /* quirks for ATI SB / AMD Hudson */
 #define AZX_DCAPS_PRESET_ATI_SB \
        (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\
@@ -2027,7 +2030,7 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
        /* Sunrise Point-LP */
        { PCI_DEVICE(0x8086, 0x9d70),
-         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+         .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
        /* Haswell */
        { PCI_DEVICE(0x8086, 0x0a0c),
          .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
index aa484fdf43389d476742aea16ee95a03318028ae..166e3e84b963875eec1d854e9ef8e2d434df0edb 100644 (file)
@@ -171,6 +171,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
 #define AZX_DCAPS_I915_POWERWELL (1 << 27)     /* HSW i915 powerwell support */
 #define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)  /* CORBRP clears itself after reset */
 #define AZX_DCAPS_NO_MSI64      (1 << 29)      /* Stick to 32-bit MSIs */
+#define AZX_DCAPS_SEPARATE_STREAM_TAG  (1 << 30) /* capture and playback use separate stream tag */
 
 enum {
        AZX_SNOOP_TYPE_NONE ,
index bef721592c3a9c08ecf908b871d555a6ca836187..ccc962a1699f1b160dc3ad074496d003e173919a 100644 (file)
@@ -468,7 +468,7 @@ int snd_hda_get_bool_hint(struct hda_codec *codec, const char *key)
 EXPORT_SYMBOL_GPL(snd_hda_get_bool_hint);
 
 /**
- * snd_hda_get_bool_hint - Get a boolean hint value
+ * snd_hda_get_int_hint - Get an integer hint value
  * @codec: the HDA codec
  * @key: the hint key string
  * @valp: pointer to store a value
index c81b715d6c985f2002daaa3da4567ac100257f8a..a9d78e275138573b1248a71aeb7236a69b879974 100644 (file)
@@ -195,7 +195,8 @@ static int ad198x_parse_auto_config(struct hda_codec *codec, bool indep_hp)
        codec->no_sticky_stream = 1;
 
        spec->gen.indep_hp = indep_hp;
-       spec->gen.add_stereo_mix_input = 1;
+       if (!spec->gen.add_stereo_mix_input)
+               spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
 
        err = snd_hda_parse_pin_defcfg(codec, cfg, NULL, 0);
        if (err < 0)
@@ -256,6 +257,18 @@ static void ad1986a_fixup_eapd(struct hda_codec *codec,
        }
 }
 
+/* enable stereo-mix input for avoiding regression on KDE (bko#88251) */
+static void ad1986a_fixup_eapd_mix_in(struct hda_codec *codec,
+                                     const struct hda_fixup *fix, int action)
+{
+       struct ad198x_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               ad1986a_fixup_eapd(codec, fix, action);
+               spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_ENABLE;
+       }
+}
+
 enum {
        AD1986A_FIXUP_INV_JACK_DETECT,
        AD1986A_FIXUP_ULTRA,
@@ -264,6 +277,8 @@ enum {
        AD1986A_FIXUP_LAPTOP,
        AD1986A_FIXUP_LAPTOP_IMIC,
        AD1986A_FIXUP_EAPD,
+       AD1986A_FIXUP_EAPD_MIX_IN,
+       AD1986A_FIXUP_EASYNOTE,
 };
 
 static const struct hda_fixup ad1986a_fixups[] = {
@@ -328,6 +343,30 @@ static const struct hda_fixup ad1986a_fixups[] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = ad1986a_fixup_eapd,
        },
+       [AD1986A_FIXUP_EAPD_MIX_IN] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = ad1986a_fixup_eapd_mix_in,
+       },
+       [AD1986A_FIXUP_EASYNOTE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x1a, 0x0421402f }, /* headphone */
+                       { 0x1b, 0x90170110 }, /* speaker */
+                       { 0x1c, 0x411111f0 }, /* N/A */
+                       { 0x1d, 0x90a70130 }, /* int mic */
+                       { 0x1e, 0x411111f0 }, /* N/A */
+                       { 0x1f, 0x04a19040 }, /* mic */
+                       { 0x20, 0x411111f0 }, /* N/A */
+                       { 0x21, 0x411111f0 }, /* N/A */
+                       { 0x22, 0x411111f0 }, /* N/A */
+                       { 0x23, 0x411111f0 }, /* N/A */
+                       { 0x24, 0x411111f0 }, /* N/A */
+                       { 0x25, 0x411111f0 }, /* N/A */
+                       {}
+               },
+               .chained = true,
+               .chain_id = AD1986A_FIXUP_EAPD_MIX_IN,
+       },
 };
 
 static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
@@ -341,6 +380,7 @@ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
        SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP),
        SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG),
        SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA),
+       SND_PCI_QUIRK(0x1631, 0xc022, "PackardBell EasyNote MX65", AD1986A_FIXUP_EASYNOTE),
        SND_PCI_QUIRK(0x17aa, 0x2066, "Lenovo N100", AD1986A_FIXUP_INV_JACK_DETECT),
        SND_PCI_QUIRK(0x17aa, 0x1011, "Lenovo M55", AD1986A_FIXUP_3STACK),
        SND_PCI_QUIRK(0x17aa, 0x1017, "Lenovo A60", AD1986A_FIXUP_3STACK),
index e9ebc7bd752cae1afdf95c0b2d2e0e8a15d2393b..fd3ed18670e9c4005d115a26f6410c7efd4faff8 100644 (file)
@@ -855,14 +855,14 @@ static int patch_conexant_auto(struct hda_codec *codec)
        case 0x14f15045:
                codec->single_adc_amp = 1;
                spec->gen.mixer_nid = 0x17;
-               spec->gen.add_stereo_mix_input = 1;
+               spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
                snd_hda_pick_fixup(codec, cxt5045_fixup_models,
                                   cxt5045_fixups, cxt_fixups);
                break;
        case 0x14f15047:
                codec->pin_amp_workaround = 1;
                spec->gen.mixer_nid = 0x19;
-               spec->gen.add_stereo_mix_input = 1;
+               spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
                snd_hda_pick_fixup(codec, cxt5047_fixup_models,
                                   cxt5047_fixups, cxt_fixups);
                break;
index 9dc9cf8c90e97bd5d0faa08bedae29139e43d3bc..5f13d2d180791fb4cd674ee52ffcdb84ffc9c2d3 100644 (file)
@@ -47,7 +47,9 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info");
 
 #define is_haswell(codec)  ((codec)->vendor_id == 0x80862807)
 #define is_broadwell(codec)    ((codec)->vendor_id == 0x80862808)
-#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec))
+#define is_skylake(codec) ((codec)->vendor_id == 0x80862809)
+#define is_haswell_plus(codec) (is_haswell(codec) || is_broadwell(codec) \
+                                       || is_skylake(codec))
 
 #define is_valleyview(codec) ((codec)->vendor_id == 0x80862882)
 #define is_cherryview(codec) ((codec)->vendor_id == 0x80862883)
@@ -3365,6 +3367,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x80862806, .name = "PantherPoint HDMI", .patch = patch_generic_hdmi },
 { .id = 0x80862807, .name = "Haswell HDMI",    .patch = patch_generic_hdmi },
 { .id = 0x80862808, .name = "Broadwell HDMI",  .patch = patch_generic_hdmi },
+{ .id = 0x80862809, .name = "Skylake HDMI",    .patch = patch_generic_hdmi },
 { .id = 0x80862880, .name = "CedarTrail HDMI", .patch = patch_generic_hdmi },
 { .id = 0x80862882, .name = "Valleyview2 HDMI",        .patch = patch_generic_hdmi },
 { .id = 0x80862883, .name = "Braswell HDMI",   .patch = patch_generic_hdmi },
@@ -3425,6 +3428,7 @@ MODULE_ALIAS("snd-hda-codec-id:80862805");
 MODULE_ALIAS("snd-hda-codec-id:80862806");
 MODULE_ALIAS("snd-hda-codec-id:80862807");
 MODULE_ALIAS("snd-hda-codec-id:80862808");
+MODULE_ALIAS("snd-hda-codec-id:80862809");
 MODULE_ALIAS("snd-hda-codec-id:80862880");
 MODULE_ALIAS("snd-hda-codec-id:80862882");
 MODULE_ALIAS("snd-hda-codec-id:80862883");
index a722067c491cb9110e4d9ef5375197058e2f1488..65f1f4e18ea5c5885d4a0e9daedf189c92c7fe3f 100644 (file)
@@ -321,10 +321,12 @@ static void alc_fill_eapd_coef(struct hda_codec *codec)
                break;
        case 0x10ec0233:
        case 0x10ec0255:
+       case 0x10ec0256:
        case 0x10ec0282:
        case 0x10ec0283:
        case 0x10ec0286:
        case 0x10ec0288:
+       case 0x10ec0298:
                alc_update_coef_idx(codec, 0x10, 1<<9, 0);
                break;
        case 0x10ec0285:
@@ -2659,7 +2661,9 @@ enum {
        ALC269_TYPE_ALC284,
        ALC269_TYPE_ALC285,
        ALC269_TYPE_ALC286,
+       ALC269_TYPE_ALC298,
        ALC269_TYPE_ALC255,
+       ALC269_TYPE_ALC256,
 };
 
 /*
@@ -2686,7 +2690,9 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
        case ALC269_TYPE_ALC282:
        case ALC269_TYPE_ALC283:
        case ALC269_TYPE_ALC286:
+       case ALC269_TYPE_ALC298:
        case ALC269_TYPE_ALC255:
+       case ALC269_TYPE_ALC256:
                ssids = alc269_ssids;
                break;
        default:
@@ -4829,6 +4835,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5417,9 +5424,15 @@ static int patch_alc269(struct hda_codec *codec)
                spec->codec_variant = ALC269_TYPE_ALC286;
                spec->shutup = alc286_shutup;
                break;
+       case 0x10ec0298:
+               spec->codec_variant = ALC269_TYPE_ALC298;
+               break;
        case 0x10ec0255:
                spec->codec_variant = ALC269_TYPE_ALC255;
                break;
+       case 0x10ec0256:
+               spec->codec_variant = ALC269_TYPE_ALC256;
+               break;
        }
 
        if (snd_hda_codec_read(codec, 0x51, 0, AC_VERB_PARAMETERS, 0) == 0x10ec5505) {
@@ -6341,6 +6354,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
        { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
        { .id = 0x10ec0235, .name = "ALC233", .patch = patch_alc269 },
        { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 },
+       { .id = 0x10ec0256, .name = "ALC256", .patch = patch_alc269 },
        { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
        { .id = 0x10ec0262, .name = "ALC262", .patch = patch_alc262 },
        { .id = 0x10ec0267, .name = "ALC267", .patch = patch_alc268 },
@@ -6360,6 +6374,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
        { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 },
        { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 },
        { .id = 0x10ec0293, .name = "ALC293", .patch = patch_alc269 },
+       { .id = 0x10ec0298, .name = "ALC298", .patch = patch_alc269 },
        { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
          .patch = patch_alc861 },
        { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
index 6c206b6c8d65d389119e5f7c630f45e52767bd61..3de6d3d779c994d18fb54a09e7bbf33be98eee26 100644 (file)
@@ -137,7 +137,7 @@ static struct via_spec *via_new_spec(struct hda_codec *codec)
        spec->gen.indep_hp = 1;
        spec->gen.keep_eapd_on = 1;
        spec->gen.pcm_playback_hook = via_playback_pcm_hook;
-       spec->gen.add_stereo_mix_input = 1;
+       spec->gen.add_stereo_mix_input = HDA_HINT_STEREO_MIX_AUTO;
        return spec;
 }
 
index b1cc2a4a7fc0ec3c08a597e58ef5bc9e94d68d61..99ff35e2a25d012ba052b00fef7c2d1963a8e5c3 100644 (file)
@@ -267,7 +267,7 @@ static void atmel_ssc_shutdown(struct snd_pcm_substream *substream,
        if (!ssc_p->dir_mask) {
                if (ssc_p->initialized) {
                        /* Shutdown the SSC clock. */
-                       pr_debug("atmel_ssc_dau: Stopping clock\n");
+                       pr_debug("atmel_ssc_dai: Stopping clock\n");
                        clk_disable(ssc_p->ssc->clk);
 
                        free_irq(ssc_p->ssc->irq, ssc_p);
index 883c5778b309322797a9b49b38b2ff117eec079b..8349f982a586841a3ac9e7b0526af05699c7deaf 100644 (file)
@@ -520,6 +520,8 @@ config SND_SOC_RT5670
 
 config SND_SOC_RT5677
        tristate
+       select REGMAP_I2C
+       select REGMAP_IRQ
 
 config SND_SOC_RT5677_SPI
        tristate
index 4d62230bd378f35626ce39f15a8d225c63fd4c8d..d0547fa275fc307d8ff40b7d6dcf2585061dc317 100644 (file)
@@ -24,8 +24,13 @@ static int pcm512x_i2c_probe(struct i2c_client *i2c,
                             const struct i2c_device_id *id)
 {
        struct regmap *regmap;
+       struct regmap_config config = pcm512x_regmap;
 
-       regmap = devm_regmap_init_i2c(i2c, &pcm512x_regmap);
+       /* msb needs to be set to enable auto-increment of addresses */
+       config.read_flag_mask = 0x80;
+       config.write_flag_mask = 0x80;
+
+       regmap = devm_regmap_init_i2c(i2c, &config);
        if (IS_ERR(regmap))
                return PTR_ERR(regmap);
 
index a7789a8726e34615b66eeefa639b7bd4718ac254..27141e2df878a65204fc31cc9486fb099319bd26 100644 (file)
@@ -2209,6 +2209,10 @@ static int rt5645_jack_detect(struct snd_soc_codec *codec)
        int gpio_state, jack_type = 0;
        unsigned int val;
 
+       if (!gpio_is_valid(rt5645->pdata.hp_det_gpio)) {
+               dev_err(codec->dev, "invalid gpio\n");
+               return -EINVAL;
+       }
        gpio_state = gpio_get_value(rt5645->pdata.hp_det_gpio);
 
        dev_dbg(codec->dev, "gpio = %d(%d)\n", rt5645->pdata.hp_det_gpio,
index b8a782c0d4cd26ba3bb63394670bbcfd648d1880..619525200705e3651be1780a229d3b7dc2d35537 100644 (file)
@@ -998,7 +998,7 @@ static int hsw_pcm_dev_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
 
 static int hsw_pcm_runtime_idle(struct device *dev)
 {
@@ -1057,7 +1057,7 @@ static int hsw_pcm_runtime_resume(struct device *dev)
 #define hsw_pcm_runtime_resume         NULL
 #endif
 
-#if defined(CONFIG_PM_SLEEP) && defined(CONFIG_PM_RUNTIME)
+#ifdef CONFIG_PM
 
 static void hsw_pcm_complete(struct device *dev)
 {
index 31124aa4434ef8da3d21ecb7d382703d5566f704..3abc29e8a9287d133636d97fe74a96687c953bc0 100644 (file)
@@ -43,7 +43,7 @@
 #include "sst.h"
 
 struct sst_machines {
-       char codec_id[32];
+       char *codec_id;
        char board[32];
        char machine[32];
        void (*machine_quirk)(void);
@@ -277,16 +277,16 @@ int sst_acpi_probe(struct platform_device *pdev)
        dev_dbg(dev, "ACPI device id: %x\n", dev_id);
 
        plat_dev = platform_device_register_data(dev, mach->pdata->platform, -1, NULL, 0);
-       if (plat_dev == NULL) {
+       if (IS_ERR(plat_dev)) {
                dev_err(dev, "Failed to create machine device: %s\n", mach->pdata->platform);
-               return -ENODEV;
+               return PTR_ERR(plat_dev);
        }
 
        /* Create platform device for sst machine driver */
        mdev = platform_device_register_data(dev, mach->machine, -1, NULL, 0);
-       if (mdev == NULL) {
+       if (IS_ERR(mdev)) {
                dev_err(dev, "Failed to create machine device: %s\n", mach->machine);
-               return -ENODEV;
+               return PTR_ERR(mdev);
        }
 
        ret = sst_alloc_drv_context(&ctx, dev, dev_id);
index b1a7c5bce4a169d499cdbe46984f7ad95dc9644b..b5a80c528d869e58cbbb58c44ba3cf6a531616ae 100644 (file)
@@ -1261,6 +1261,8 @@ static int samsung_i2s_probe(struct platform_device *pdev)
                        ret = -ENOMEM;
                        goto err;
                }
+
+               sec_dai->variant_regs = pri_dai->variant_regs;
                sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
                sec_dai->dma_playback.ch_name = "tx-sec";
 
index 1994d41348f88d5dec44f7d4c171be90e62eff3f..b703cb3cda1993402d60efc03e9e7d840cb68f72 100644 (file)
@@ -333,8 +333,11 @@ static struct usbmix_name_map gamecom780_map[] = {
        {}
 };
 
-static const struct usbmix_name_map kef_x300a_map[] = {
-       { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
+/* some (all?) SCMS USB3318 devices are affected by a firmware lock up
+ * when anything attempts to access FU 10 (control)
+ */
+static const struct usbmix_name_map scms_usb3318_map[] = {
+       { 10, NULL },
        { 0 }
 };
 
@@ -434,8 +437,14 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .map = ebox44_map,
        },
        {
+               /* KEF X300A */
                .id = USB_ID(0x27ac, 0x1000),
-               .map = kef_x300a_map,
+               .map = scms_usb3318_map,
+       },
+       {
+               /* Arcam rPAC */
+               .id = USB_ID(0x25c4, 0x0003),
+               .map = scms_usb3318_map,
        },
        { 0 } /* terminator */
 };
index 9109652b88b94345103e774004d8e6ff344ff5c1..7438e7c4a842da4aac34334f13575aa765e3c79d 100644 (file)
@@ -655,7 +655,7 @@ static struct scarlett_device_info s6i6_info = {
                .names = NULL
        },
 
-       .num_controls = 0,
+       .num_controls = 9,
        .controls = {
                { .num = 0, .type = SCARLETT_OUTPUTS, .name = "Monitor" },
                { .num = 1, .type = SCARLETT_OUTPUTS, .name = "Headphone" },
index 4dbfb3d18ee2356c95c30974c8a778570ef4da47..a7398412310bd53e00b84c2aa6c567ac451a4e7a 100644 (file)
@@ -1245,8 +1245,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
 
        /* XMOS based USB DACs */
        switch (chip->usb_id) {
-       /* iFi Audio micro/nano iDSD */
-       case USB_ID(0x20b1, 0x3008):
+       case USB_ID(0x20b1, 0x3008): /* iFi Audio micro/nano iDSD */
+       case USB_ID(0x20b1, 0x2008): /* Matrix Audio X-Sabre */
+       case USB_ID(0x20b1, 0x300a): /* Matrix Audio Mini-i Pro */
                if (fp->altsetting == 2)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
diff --git a/tools/include/asm-generic/bitops.h b/tools/include/asm-generic/bitops.h
new file mode 100644 (file)
index 0000000..6eedba1
--- /dev/null
@@ -0,0 +1,27 @@
+#ifndef __TOOLS_ASM_GENERIC_BITOPS_H
+#define __TOOLS_ASM_GENERIC_BITOPS_H
+
+/*
+ * tools/ copied this from include/asm-generic/bitops.h, bit by bit as it needed
+ * some functions.
+ *
+ * For the benefit of those who are trying to port Linux to another
+ * architecture, here are some C-language equivalents.  You should
+ * recode these in the native assembly language, if at all possible.
+ *
+ * C language equivalents written by Theodore Ts'o, 9/26/92
+ */
+
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+
+#ifndef _TOOLS_LINUX_BITOPS_H_
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/atomic.h>
+
+#endif /* __TOOLS_ASM_GENERIC_BITOPS_H */
diff --git a/tools/include/asm-generic/bitops/__ffs.h b/tools/include/asm-generic/bitops/__ffs.h
new file mode 100644 (file)
index 0000000..c941750
--- /dev/null
@@ -0,0 +1,43 @@
+#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS___FFS_H_
+#define _TOOLS_LINUX_ASM_GENERIC_BITOPS___FFS_H_
+
+#include <asm/types.h>
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+       int num = 0;
+
+#if __BITS_PER_LONG == 64
+       if ((word & 0xffffffff) == 0) {
+               num += 32;
+               word >>= 32;
+       }
+#endif
+       if ((word & 0xffff) == 0) {
+               num += 16;
+               word >>= 16;
+       }
+       if ((word & 0xff) == 0) {
+               num += 8;
+               word >>= 8;
+       }
+       if ((word & 0xf) == 0) {
+               num += 4;
+               word >>= 4;
+       }
+       if ((word & 0x3) == 0) {
+               num += 2;
+               word >>= 2;
+       }
+       if ((word & 0x1) == 0)
+               num += 1;
+       return num;
+}
+
+#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS___FFS_H_ */
diff --git a/tools/include/asm-generic/bitops/__fls.h b/tools/include/asm-generic/bitops/__fls.h
new file mode 100644 (file)
index 0000000..2218b9a
--- /dev/null
@@ -0,0 +1 @@
+#include <../../../../include/asm-generic/bitops/__fls.h>
diff --git a/tools/include/asm-generic/bitops/atomic.h b/tools/include/asm-generic/bitops/atomic.h
new file mode 100644 (file)
index 0000000..4bccd7c
--- /dev/null
@@ -0,0 +1,22 @@
+#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_
+#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_
+
+#include <asm/types.h>
+
+static inline void set_bit(int nr, unsigned long *addr)
+{
+       addr[nr / __BITS_PER_LONG] |= 1UL << (nr % __BITS_PER_LONG);
+}
+
+static inline void clear_bit(int nr, unsigned long *addr)
+{
+       addr[nr / __BITS_PER_LONG] &= ~(1UL << (nr % __BITS_PER_LONG));
+}
+
+static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
+{
+       return ((1UL << (nr % __BITS_PER_LONG)) &
+               (((unsigned long *)addr)[nr / __BITS_PER_LONG])) != 0;
+}
+
+#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_ATOMIC_H_ */
diff --git a/tools/include/asm-generic/bitops/find.h b/tools/include/asm-generic/bitops/find.h
new file mode 100644 (file)
index 0000000..31f5154
--- /dev/null
@@ -0,0 +1,33 @@
+#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_
+#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_
+
+#ifndef find_next_bit
+/**
+ * find_next_bit - find the next set bit in a memory region
+ * @addr: The address to base the search on
+ * @offset: The bitnumber to start searching at
+ * @size: The bitmap size in bits
+ *
+ * Returns the bit number for the next set bit
+ * If no bits are set, returns @size.
+ */
+extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
+               size, unsigned long offset);
+#endif
+
+#ifndef find_first_bit
+
+/**
+ * find_first_bit - find the first set bit in a memory region
+ * @addr: The address to start the search at
+ * @size: The maximum number of bits to search
+ *
+ * Returns the bit number of the first set bit.
+ * If no bits are set, returns @size.
+ */
+extern unsigned long find_first_bit(const unsigned long *addr,
+                                   unsigned long size);
+
+#endif /* find_first_bit */
+
+#endif /*_TOOLS_LINUX_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/tools/include/asm-generic/bitops/fls.h b/tools/include/asm-generic/bitops/fls.h
new file mode 100644 (file)
index 0000000..dbf711a
--- /dev/null
@@ -0,0 +1 @@
+#include <../../../../include/asm-generic/bitops/fls.h>
diff --git a/tools/include/asm-generic/bitops/fls64.h b/tools/include/asm-generic/bitops/fls64.h
new file mode 100644 (file)
index 0000000..980b1f6
--- /dev/null
@@ -0,0 +1 @@
+#include <../../../../include/asm-generic/bitops/fls64.h>
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
new file mode 100644 (file)
index 0000000..26005a1
--- /dev/null
@@ -0,0 +1,53 @@
+#ifndef _TOOLS_LINUX_BITOPS_H_
+#define _TOOLS_LINUX_BITOPS_H_
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <asm/hweight.h>
+
+#ifndef __WORDSIZE
+#define __WORDSIZE (__SIZEOF_LONG__ * 8)
+#endif
+
+#define BITS_PER_LONG __WORDSIZE
+
+#define BIT_MASK(nr)           (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr)           ((nr) / BITS_PER_LONG)
+#define BITS_PER_BYTE          8
+#define BITS_TO_LONGS(nr)      DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
+#define BITS_TO_U64(nr)                DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
+#define BITS_TO_U32(nr)                DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
+#define BITS_TO_BYTES(nr)      DIV_ROUND_UP(nr, BITS_PER_BYTE)
+
+/*
+ * Include this here because some architectures need generic_ffs/fls in
+ * scope
+ *
+ * XXX: this needs to be asm/bitops.h, when we get to per arch optimizations
+ */
+#include <asm-generic/bitops.h>
+
+#define for_each_set_bit(bit, addr, size) \
+       for ((bit) = find_first_bit((addr), (size));            \
+            (bit) < (size);                                    \
+            (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+/* same as for_each_set_bit() but use bit as value to start with */
+#define for_each_set_bit_from(bit, addr, size) \
+       for ((bit) = find_next_bit((addr), (size), (bit));      \
+            (bit) < (size);                                    \
+            (bit) = find_next_bit((addr), (size), (bit) + 1))
+
+static inline unsigned long hweight_long(unsigned long w)
+{
+       return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
+}
+
+static inline unsigned fls_long(unsigned long l)
+{
+       if (sizeof(l) == 4)
+               return fls(l);
+       return fls64(l);
+}
+
+#endif
diff --git a/tools/include/linux/log2.h b/tools/include/linux/log2.h
new file mode 100644 (file)
index 0000000..4144666
--- /dev/null
@@ -0,0 +1,185 @@
+/* Integer base 2 logarithm calculation
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _TOOLS_LINUX_LOG2_H
+#define _TOOLS_LINUX_LOG2_H
+
+/*
+ * deal with unrepresentable constant logarithms
+ */
+extern __attribute__((const, noreturn))
+int ____ilog2_NaN(void);
+
+/*
+ * non-constant log of base 2 calculators
+ * - the arch may override these in asm/bitops.h if they can be implemented
+ *   more efficiently than using fls() and fls64()
+ * - the arch is not required to handle n==0 if implementing the fallback
+ */
+static inline __attribute__((const))
+int __ilog2_u32(u32 n)
+{
+       return fls(n) - 1;
+}
+
+static inline __attribute__((const))
+int __ilog2_u64(u64 n)
+{
+       return fls64(n) - 1;
+}
+
+/*
+ *  Determine whether some value is a power of two, where zero is
+ * *not* considered a power of two.
+ */
+
+static inline __attribute__((const))
+bool is_power_of_2(unsigned long n)
+{
+       return (n != 0 && ((n & (n - 1)) == 0));
+}
+
+/*
+ * round up to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __roundup_pow_of_two(unsigned long n)
+{
+       return 1UL << fls_long(n - 1);
+}
+
+/*
+ * round down to nearest power of two
+ */
+static inline __attribute__((const))
+unsigned long __rounddown_pow_of_two(unsigned long n)
+{
+       return 1UL << (fls_long(n) - 1);
+}
+
+/**
+ * ilog2 - log of base 2 of 32-bit or a 64-bit unsigned value
+ * @n - parameter
+ *
+ * constant-capable log of base 2 calculation
+ * - this can be used to initialise global variables from constant data, hence
+ *   the massive ternary operator construction
+ *
+ * selects the appropriately-sized optimised version depending on sizeof(n)
+ */
+#define ilog2(n)                               \
+(                                              \
+       __builtin_constant_p(n) ? (             \
+               (n) < 1 ? ____ilog2_NaN() :     \
+               (n) & (1ULL << 63) ? 63 :       \
+               (n) & (1ULL << 62) ? 62 :       \
+               (n) & (1ULL << 61) ? 61 :       \
+               (n) & (1ULL << 60) ? 60 :       \
+               (n) & (1ULL << 59) ? 59 :       \
+               (n) & (1ULL << 58) ? 58 :       \
+               (n) & (1ULL << 57) ? 57 :       \
+               (n) & (1ULL << 56) ? 56 :       \
+               (n) & (1ULL << 55) ? 55 :       \
+               (n) & (1ULL << 54) ? 54 :       \
+               (n) & (1ULL << 53) ? 53 :       \
+               (n) & (1ULL << 52) ? 52 :       \
+               (n) & (1ULL << 51) ? 51 :       \
+               (n) & (1ULL << 50) ? 50 :       \
+               (n) & (1ULL << 49) ? 49 :       \
+               (n) & (1ULL << 48) ? 48 :       \
+               (n) & (1ULL << 47) ? 47 :       \
+               (n) & (1ULL << 46) ? 46 :       \
+               (n) & (1ULL << 45) ? 45 :       \
+               (n) & (1ULL << 44) ? 44 :       \
+               (n) & (1ULL << 43) ? 43 :       \
+               (n) & (1ULL << 42) ? 42 :       \
+               (n) & (1ULL << 41) ? 41 :       \
+               (n) & (1ULL << 40) ? 40 :       \
+               (n) & (1ULL << 39) ? 39 :       \
+               (n) & (1ULL << 38) ? 38 :       \
+               (n) & (1ULL << 37) ? 37 :       \
+               (n) & (1ULL << 36) ? 36 :       \
+               (n) & (1ULL << 35) ? 35 :       \
+               (n) & (1ULL << 34) ? 34 :       \
+               (n) & (1ULL << 33) ? 33 :       \
+               (n) & (1ULL << 32) ? 32 :       \
+               (n) & (1ULL << 31) ? 31 :       \
+               (n) & (1ULL << 30) ? 30 :       \
+               (n) & (1ULL << 29) ? 29 :       \
+               (n) & (1ULL << 28) ? 28 :       \
+               (n) & (1ULL << 27) ? 27 :       \
+               (n) & (1ULL << 26) ? 26 :       \
+               (n) & (1ULL << 25) ? 25 :       \
+               (n) & (1ULL << 24) ? 24 :       \
+               (n) & (1ULL << 23) ? 23 :       \
+               (n) & (1ULL << 22) ? 22 :       \
+               (n) & (1ULL << 21) ? 21 :       \
+               (n) & (1ULL << 20) ? 20 :       \
+               (n) & (1ULL << 19) ? 19 :       \
+               (n) & (1ULL << 18) ? 18 :       \
+               (n) & (1ULL << 17) ? 17 :       \
+               (n) & (1ULL << 16) ? 16 :       \
+               (n) & (1ULL << 15) ? 15 :       \
+               (n) & (1ULL << 14) ? 14 :       \
+               (n) & (1ULL << 13) ? 13 :       \
+               (n) & (1ULL << 12) ? 12 :       \
+               (n) & (1ULL << 11) ? 11 :       \
+               (n) & (1ULL << 10) ? 10 :       \
+               (n) & (1ULL <<  9) ?  9 :       \
+               (n) & (1ULL <<  8) ?  8 :       \
+               (n) & (1ULL <<  7) ?  7 :       \
+               (n) & (1ULL <<  6) ?  6 :       \
+               (n) & (1ULL <<  5) ?  5 :       \
+               (n) & (1ULL <<  4) ?  4 :       \
+               (n) & (1ULL <<  3) ?  3 :       \
+               (n) & (1ULL <<  2) ?  2 :       \
+               (n) & (1ULL <<  1) ?  1 :       \
+               (n) & (1ULL <<  0) ?  0 :       \
+               ____ilog2_NaN()                 \
+                                  ) :          \
+       (sizeof(n) <= 4) ?                      \
+       __ilog2_u32(n) :                        \
+       __ilog2_u64(n)                          \
+ )
+
+/**
+ * roundup_pow_of_two - round the given value up to nearest power of two
+ * @n - parameter
+ *
+ * round the given value up to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define roundup_pow_of_two(n)                  \
+(                                              \
+       __builtin_constant_p(n) ? (             \
+               (n == 1) ? 1 :                  \
+               (1UL << (ilog2((n) - 1) + 1))   \
+                                  ) :          \
+       __roundup_pow_of_two(n)                 \
+ )
+
+/**
+ * rounddown_pow_of_two - round the given value down to nearest power of two
+ * @n - parameter
+ *
+ * round the given value down to the nearest power of two
+ * - the result is undefined when n == 0
+ * - this can be used to initialise global variables from constant data
+ */
+#define rounddown_pow_of_two(n)                        \
+(                                              \
+       __builtin_constant_p(n) ? (             \
+               (1UL << ilog2(n))) :            \
+       __rounddown_pow_of_two(n)               \
+ )
+
+#endif /* _TOOLS_LINUX_LOG2_H */
index c1b49c36a951d74b1c12b417b20428b0f4326e70..65d9be3f988747ae300db30d3b62cf0e8213dd69 100644 (file)
@@ -7,6 +7,10 @@
 #include <stdlib.h>
 #include <string.h>
 #include <sys/vfs.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
 
 #include "debugfs.h"
 #include "fs.h"
@@ -163,3 +167,33 @@ const char *name##__mountpoint(void)       \
 
 FS__MOUNTPOINT(sysfs,  FS__SYSFS);
 FS__MOUNTPOINT(procfs, FS__PROCFS);
+
+int filename__read_int(const char *filename, int *value)
+{
+       char line[64];
+       int fd = open(filename, O_RDONLY), err = -1;
+
+       if (fd < 0)
+               return -1;
+
+       if (read(fd, line, sizeof(line)) > 0) {
+               *value = atoi(line);
+               err = 0;
+       }
+
+       close(fd);
+       return err;
+}
+
+int sysctl__read_int(const char *sysctl, int *value)
+{
+       char path[PATH_MAX];
+       const char *procfs = procfs__mountpoint();
+
+       if (!procfs)
+               return -1;
+
+       snprintf(path, sizeof(path), "%s/sys/%s", procfs, sysctl);
+
+       return filename__read_int(path, value);
+}
index cb7049551f335d6a7dbcb108933ba8e9d16e5e59..6caa2bbc6cecdc2f7fa39b69fa1cf30f8653f15e 100644 (file)
@@ -11,4 +11,7 @@
 
 const char *sysfs__mountpoint(void);
 const char *procfs__mountpoint(void);
+
+int filename__read_int(const char *filename, int *value);
+int sysctl__read_int(const char *sysctl, int *value);
 #endif /* __API_FS__ */
diff --git a/tools/lib/util/find_next_bit.c b/tools/lib/util/find_next_bit.c
new file mode 100644 (file)
index 0000000..41b44f6
--- /dev/null
@@ -0,0 +1,89 @@
+/* find_next_bit.c: fallback find next bit implementation
+ *
+ * Copied from lib/find_next_bit.c to tools/lib/next_bit.c
+ *
+ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/bitops.h>
+#include <asm/types.h>
+#include <asm/byteorder.h>
+
+#define BITOP_WORD(nr)         ((nr) / BITS_PER_LONG)
+
+#ifndef find_next_bit
+/*
+ * Find the next set bit in a memory region.
+ */
+unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
+                           unsigned long offset)
+{
+       const unsigned long *p = addr + BITOP_WORD(offset);
+       unsigned long result = offset & ~(BITS_PER_LONG-1);
+       unsigned long tmp;
+
+       if (offset >= size)
+               return size;
+       size -= result;
+       offset %= BITS_PER_LONG;
+       if (offset) {
+               tmp = *(p++);
+               tmp &= (~0UL << offset);
+               if (size < BITS_PER_LONG)
+                       goto found_first;
+               if (tmp)
+                       goto found_middle;
+               size -= BITS_PER_LONG;
+               result += BITS_PER_LONG;
+       }
+       while (size & ~(BITS_PER_LONG-1)) {
+               if ((tmp = *(p++)))
+                       goto found_middle;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+       tmp = *p;
+
+found_first:
+       tmp &= (~0UL >> (BITS_PER_LONG - size));
+       if (tmp == 0UL)         /* Are any bits set? */
+               return result + size;   /* Nope. */
+found_middle:
+       return result + __ffs(tmp);
+}
+#endif
+
+#ifndef find_first_bit
+/*
+ * Find the first set bit in a memory region.
+ */
+unsigned long find_first_bit(const unsigned long *addr, unsigned long size)
+{
+       const unsigned long *p = addr;
+       unsigned long result = 0;
+       unsigned long tmp;
+
+       while (size & ~(BITS_PER_LONG-1)) {
+               if ((tmp = *(p++)))
+                       goto found;
+               result += BITS_PER_LONG;
+               size -= BITS_PER_LONG;
+       }
+       if (!size)
+               return result;
+
+       tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
+       if (tmp == 0UL)         /* Are any bits set? */
+               return result + size;   /* Nope. */
+found:
+       return result + __ffs(tmp);
+}
+#endif
index d240bb2e5b22348d23d1d7a2f569e074c7b0995e..1e8e400b449375ab8c6b9bba5161d37ea7e1f5fe 100644 (file)
@@ -18,6 +18,10 @@ OPTIONS
          --debug verbose   # sets verbose = 1
          --debug verbose=2 # sets verbose = 2
 
+--buildid-dir::
+       Setup buildid cache directory. It has higher priority than
+       buildid.dir config file option.
+
 DESCRIPTION
 -----------
 Performance counters for Linux are a new kernel-based subsystem
index 344c4d3d0a4a7bb567b9d40c06175b0984b1a52a..83e2887f91a39200612290bd0b2b2fbd824e660a 100644 (file)
@@ -4,17 +4,31 @@ tools/lib/traceevent
 tools/lib/api
 tools/lib/symbol/kallsyms.c
 tools/lib/symbol/kallsyms.h
+tools/lib/util/find_next_bit.c
 tools/include/asm/bug.h
+tools/include/asm-generic/bitops/atomic.h
+tools/include/asm-generic/bitops/__ffs.h
+tools/include/asm-generic/bitops/__fls.h
+tools/include/asm-generic/bitops/find.h
+tools/include/asm-generic/bitops/fls64.h
+tools/include/asm-generic/bitops/fls.h
+tools/include/asm-generic/bitops.h
+tools/include/linux/bitops.h
 tools/include/linux/compiler.h
-tools/include/linux/hash.h
 tools/include/linux/export.h
+tools/include/linux/hash.h
+tools/include/linux/log2.h
 tools/include/linux/types.h
+include/asm-generic/bitops/fls64.h
+include/asm-generic/bitops/__fls.h
+include/asm-generic/bitops/fls.h
 include/linux/const.h
 include/linux/perf_event.h
 include/linux/rbtree.h
 include/linux/list.h
 include/linux/hash.h
 include/linux/stringify.h
+lib/find_next_bit.c
 lib/rbtree.c
 include/linux/swab.h
 arch/*/include/asm/unistd*.h
index 478efa9b23640abbad22db3974c7474017ab2a9f..67a03a825b3c94bb894f0f557dac3bb520943e65 100644 (file)
@@ -231,8 +231,16 @@ LIB_H += ../../include/uapi/linux/const.h
 LIB_H += ../include/linux/hash.h
 LIB_H += ../../include/linux/stringify.h
 LIB_H += util/include/linux/bitmap.h
-LIB_H += util/include/linux/bitops.h
+LIB_H += ../include/linux/bitops.h
+LIB_H += ../include/asm-generic/bitops/atomic.h
+LIB_H += ../include/asm-generic/bitops/find.h
+LIB_H += ../include/asm-generic/bitops/fls64.h
+LIB_H += ../include/asm-generic/bitops/fls.h
+LIB_H += ../include/asm-generic/bitops/__ffs.h
+LIB_H += ../include/asm-generic/bitops/__fls.h
+LIB_H += ../include/asm-generic/bitops.h
 LIB_H += ../include/linux/compiler.h
+LIB_H += ../include/linux/log2.h
 LIB_H += util/include/linux/const.h
 LIB_H += util/include/linux/ctype.h
 LIB_H += util/include/linux/kernel.h
@@ -335,6 +343,7 @@ LIB_OBJS += $(OUTPUT)util/event.o
 LIB_OBJS += $(OUTPUT)util/evlist.o
 LIB_OBJS += $(OUTPUT)util/evsel.o
 LIB_OBJS += $(OUTPUT)util/exec_cmd.o
+LIB_OBJS += $(OUTPUT)util/find_next_bit.o
 LIB_OBJS += $(OUTPUT)util/help.o
 LIB_OBJS += $(OUTPUT)util/kallsyms.o
 LIB_OBJS += $(OUTPUT)util/levenshtein.o
@@ -458,7 +467,6 @@ BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o
 BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
 endif
 BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
-BUILTIN_OBJS += $(OUTPUT)bench/mem-memset.o
 BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o
 BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o
 BUILTIN_OBJS += $(OUTPUT)bench/futex-requeue.o
@@ -735,6 +743,9 @@ $(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c $(OUTPUT)PERF-CFLAGS
 $(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
 
+$(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c $(OUTPUT)PERF-CFLAGS
+       $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
 $(OUTPUT)util/parse-events.o: util/parse-events.c $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-redundant-decls $<
 
index 2465141b554bf0e135edc427799227032e25345d..6c14afe8c1b18ea1ab4a50aa4b6dc46925d304a1 100644 (file)
@@ -13,6 +13,7 @@
 #include "../util/cloexec.h"
 #include "bench.h"
 #include "mem-memcpy-arch.h"
+#include "mem-memset-arch.h"
 
 #include <stdio.h>
 #include <stdlib.h>
@@ -48,20 +49,24 @@ static const struct option options[] = {
 };
 
 typedef void *(*memcpy_t)(void *, const void *, size_t);
+typedef void *(*memset_t)(void *, int, size_t);
 
 struct routine {
        const char *name;
        const char *desc;
-       memcpy_t fn;
+       union {
+               memcpy_t memcpy;
+               memset_t memset;
+       } fn;
 };
 
-struct routine routines[] = {
-       { "default",
-         "Default memcpy() provided by glibc",
-         memcpy },
+struct routine memcpy_routines[] = {
+       { .name = "default",
+         .desc = "Default memcpy() provided by glibc",
+         .fn.memcpy = memcpy },
 #ifdef HAVE_ARCH_X86_64_SUPPORT
 
-#define MEMCPY_FN(fn, name, desc) { name, desc, fn },
+#define MEMCPY_FN(_fn, _name, _desc) {.name = _name, .desc = _desc, .fn.memcpy = _fn},
 #include "mem-memcpy-x86-64-asm-def.h"
 #undef MEMCPY_FN
 
@@ -69,7 +74,7 @@ struct routine routines[] = {
 
        { NULL,
          NULL,
-         NULL   }
+         {NULL}   }
 };
 
 static const char * const bench_mem_memcpy_usage[] = {
@@ -110,63 +115,6 @@ static double timeval2double(struct timeval *ts)
                (double)ts->tv_usec / (double)1000000;
 }
 
-static void alloc_mem(void **dst, void **src, size_t length)
-{
-       *dst = zalloc(length);
-       if (!*dst)
-               die("memory allocation failed - maybe length is too large?\n");
-
-       *src = zalloc(length);
-       if (!*src)
-               die("memory allocation failed - maybe length is too large?\n");
-       /* Make sure to always replace the zero pages even if MMAP_THRESH is crossed */
-       memset(*src, 0, length);
-}
-
-static u64 do_memcpy_cycle(memcpy_t fn, size_t len, bool prefault)
-{
-       u64 cycle_start = 0ULL, cycle_end = 0ULL;
-       void *src = NULL, *dst = NULL;
-       int i;
-
-       alloc_mem(&src, &dst, len);
-
-       if (prefault)
-               fn(dst, src, len);
-
-       cycle_start = get_cycle();
-       for (i = 0; i < iterations; ++i)
-               fn(dst, src, len);
-       cycle_end = get_cycle();
-
-       free(src);
-       free(dst);
-       return cycle_end - cycle_start;
-}
-
-static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault)
-{
-       struct timeval tv_start, tv_end, tv_diff;
-       void *src = NULL, *dst = NULL;
-       int i;
-
-       alloc_mem(&src, &dst, len);
-
-       if (prefault)
-               fn(dst, src, len);
-
-       BUG_ON(gettimeofday(&tv_start, NULL));
-       for (i = 0; i < iterations; ++i)
-               fn(dst, src, len);
-       BUG_ON(gettimeofday(&tv_end, NULL));
-
-       timersub(&tv_end, &tv_start, &tv_diff);
-
-       free(src);
-       free(dst);
-       return (double)((double)len / timeval2double(&tv_diff));
-}
-
 #define pf (no_prefault ? 0 : 1)
 
 #define print_bps(x) do {                                      \
@@ -180,16 +128,25 @@ static double do_memcpy_gettimeofday(memcpy_t fn, size_t len, bool prefault)
                        printf(" %14lf GB/Sec", x / K / K / K); \
        } while (0)
 
-int bench_mem_memcpy(int argc, const char **argv,
-                    const char *prefix __maybe_unused)
+struct bench_mem_info {
+       const struct routine *routines;
+       u64 (*do_cycle)(const struct routine *r, size_t len, bool prefault);
+       double (*do_gettimeofday)(const struct routine *r, size_t len, bool prefault);
+       const char *const *usage;
+};
+
+static int bench_mem_common(int argc, const char **argv,
+                    const char *prefix __maybe_unused,
+                    struct bench_mem_info *info)
 {
        int i;
        size_t len;
+       double totallen;
        double result_bps[2];
        u64 result_cycle[2];
 
        argc = parse_options(argc, argv, options,
-                            bench_mem_memcpy_usage, 0);
+                            info->usage, 0);
 
        if (no_prefault && only_prefault) {
                fprintf(stderr, "Invalid options: -o and -n are mutually exclusive\n");
@@ -200,6 +157,7 @@ int bench_mem_memcpy(int argc, const char **argv,
                init_cycle();
 
        len = (size_t)perf_atoll((char *)length_str);
+       totallen = (double)len * iterations;
 
        result_cycle[0] = result_cycle[1] = 0ULL;
        result_bps[0] = result_bps[1] = 0.0;
@@ -213,16 +171,16 @@ int bench_mem_memcpy(int argc, const char **argv,
        if (only_prefault && no_prefault)
                only_prefault = no_prefault = false;
 
-       for (i = 0; routines[i].name; i++) {
-               if (!strcmp(routines[i].name, routine))
+       for (i = 0; info->routines[i].name; i++) {
+               if (!strcmp(info->routines[i].name, routine))
                        break;
        }
-       if (!routines[i].name) {
+       if (!info->routines[i].name) {
                printf("Unknown routine:%s\n", routine);
                printf("Available routines...\n");
-               for (i = 0; routines[i].name; i++) {
+               for (i = 0; info->routines[i].name; i++) {
                        printf("\t%s ... %s\n",
-                              routines[i].name, routines[i].desc);
+                              info->routines[i].name, info->routines[i].desc);
                }
                return 1;
        }
@@ -234,25 +192,25 @@ int bench_mem_memcpy(int argc, const char **argv,
                /* show both of results */
                if (use_cycle) {
                        result_cycle[0] =
-                               do_memcpy_cycle(routines[i].fn, len, false);
+                               info->do_cycle(&info->routines[i], len, false);
                        result_cycle[1] =
-                               do_memcpy_cycle(routines[i].fn, len, true);
+                               info->do_cycle(&info->routines[i], len, true);
                } else {
                        result_bps[0] =
-                               do_memcpy_gettimeofday(routines[i].fn,
+                               info->do_gettimeofday(&info->routines[i],
                                                len, false);
                        result_bps[1] =
-                               do_memcpy_gettimeofday(routines[i].fn,
+                               info->do_gettimeofday(&info->routines[i],
                                                len, true);
                }
        } else {
                if (use_cycle) {
                        result_cycle[pf] =
-                               do_memcpy_cycle(routines[i].fn,
+                               info->do_cycle(&info->routines[i],
                                                len, only_prefault);
                } else {
                        result_bps[pf] =
-                               do_memcpy_gettimeofday(routines[i].fn,
+                               info->do_gettimeofday(&info->routines[i],
                                                len, only_prefault);
                }
        }
@@ -263,10 +221,10 @@ int bench_mem_memcpy(int argc, const char **argv,
                        if (use_cycle) {
                                printf(" %14lf Cycle/Byte\n",
                                        (double)result_cycle[0]
-                                       / (double)len);
+                                       / totallen);
                                printf(" %14lf Cycle/Byte (with prefault)\n",
                                        (double)result_cycle[1]
-                                       / (double)len);
+                                       / totallen);
                        } else {
                                print_bps(result_bps[0]);
                                printf("\n");
@@ -277,7 +235,7 @@ int bench_mem_memcpy(int argc, const char **argv,
                        if (use_cycle) {
                                printf(" %14lf Cycle/Byte",
                                        (double)result_cycle[pf]
-                                       / (double)len);
+                                       / totallen);
                        } else
                                print_bps(result_bps[pf]);
 
@@ -288,8 +246,8 @@ int bench_mem_memcpy(int argc, const char **argv,
                if (!only_prefault && !no_prefault) {
                        if (use_cycle) {
                                printf("%lf %lf\n",
-                                       (double)result_cycle[0] / (double)len,
-                                       (double)result_cycle[1] / (double)len);
+                                       (double)result_cycle[0] / totallen,
+                                       (double)result_cycle[1] / totallen);
                        } else {
                                printf("%lf %lf\n",
                                        result_bps[0], result_bps[1]);
@@ -297,7 +255,7 @@ int bench_mem_memcpy(int argc, const char **argv,
                } else {
                        if (use_cycle) {
                                printf("%lf\n", (double)result_cycle[pf]
-                                       / (double)len);
+                                       / totallen);
                        } else
                                printf("%lf\n", result_bps[pf]);
                }
@@ -310,3 +268,163 @@ int bench_mem_memcpy(int argc, const char **argv,
 
        return 0;
 }
+
+static void memcpy_alloc_mem(void **dst, void **src, size_t length)
+{
+       *dst = zalloc(length);
+       if (!*dst)
+               die("memory allocation failed - maybe length is too large?\n");
+
+       *src = zalloc(length);
+       if (!*src)
+               die("memory allocation failed - maybe length is too large?\n");
+       /* Make sure to always replace the zero pages even if MMAP_THRESH is crossed */
+       memset(*src, 0, length);
+}
+
+static u64 do_memcpy_cycle(const struct routine *r, size_t len, bool prefault)
+{
+       u64 cycle_start = 0ULL, cycle_end = 0ULL;
+       void *src = NULL, *dst = NULL;
+       memcpy_t fn = r->fn.memcpy;
+       int i;
+
+       memcpy_alloc_mem(&src, &dst, len);
+
+       if (prefault)
+               fn(dst, src, len);
+
+       cycle_start = get_cycle();
+       for (i = 0; i < iterations; ++i)
+               fn(dst, src, len);
+       cycle_end = get_cycle();
+
+       free(src);
+       free(dst);
+       return cycle_end - cycle_start;
+}
+
+static double do_memcpy_gettimeofday(const struct routine *r, size_t len,
+                                    bool prefault)
+{
+       struct timeval tv_start, tv_end, tv_diff;
+       memcpy_t fn = r->fn.memcpy;
+       void *src = NULL, *dst = NULL;
+       int i;
+
+       memcpy_alloc_mem(&src, &dst, len);
+
+       if (prefault)
+               fn(dst, src, len);
+
+       BUG_ON(gettimeofday(&tv_start, NULL));
+       for (i = 0; i < iterations; ++i)
+               fn(dst, src, len);
+       BUG_ON(gettimeofday(&tv_end, NULL));
+
+       timersub(&tv_end, &tv_start, &tv_diff);
+
+       free(src);
+       free(dst);
+       return (double)(((double)len * iterations) / timeval2double(&tv_diff));
+}
+
+int bench_mem_memcpy(int argc, const char **argv,
+                    const char *prefix __maybe_unused)
+{
+       struct bench_mem_info info = {
+               .routines = memcpy_routines,
+               .do_cycle = do_memcpy_cycle,
+               .do_gettimeofday = do_memcpy_gettimeofday,
+               .usage = bench_mem_memcpy_usage,
+       };
+
+       return bench_mem_common(argc, argv, prefix, &info);
+}
+
+static void memset_alloc_mem(void **dst, size_t length)
+{
+       *dst = zalloc(length);
+       if (!*dst)
+               die("memory allocation failed - maybe length is too large?\n");
+}
+
+static u64 do_memset_cycle(const struct routine *r, size_t len, bool prefault)
+{
+       u64 cycle_start = 0ULL, cycle_end = 0ULL;
+       memset_t fn = r->fn.memset;
+       void *dst = NULL;
+       int i;
+
+       memset_alloc_mem(&dst, len);
+
+       if (prefault)
+               fn(dst, -1, len);
+
+       cycle_start = get_cycle();
+       for (i = 0; i < iterations; ++i)
+               fn(dst, i, len);
+       cycle_end = get_cycle();
+
+       free(dst);
+       return cycle_end - cycle_start;
+}
+
+static double do_memset_gettimeofday(const struct routine *r, size_t len,
+                                    bool prefault)
+{
+       struct timeval tv_start, tv_end, tv_diff;
+       memset_t fn = r->fn.memset;
+       void *dst = NULL;
+       int i;
+
+       memset_alloc_mem(&dst, len);
+
+       if (prefault)
+               fn(dst, -1, len);
+
+       BUG_ON(gettimeofday(&tv_start, NULL));
+       for (i = 0; i < iterations; ++i)
+               fn(dst, i, len);
+       BUG_ON(gettimeofday(&tv_end, NULL));
+
+       timersub(&tv_end, &tv_start, &tv_diff);
+
+       free(dst);
+       return (double)(((double)len * iterations) / timeval2double(&tv_diff));
+}
+
+static const char * const bench_mem_memset_usage[] = {
+       "perf bench mem memset <options>",
+       NULL
+};
+
+static const struct routine memset_routines[] = {
+       { .name ="default",
+         .desc = "Default memset() provided by glibc",
+         .fn.memset = memset },
+#ifdef HAVE_ARCH_X86_64_SUPPORT
+
+#define MEMSET_FN(_fn, _name, _desc) { .name = _name, .desc = _desc, .fn.memset = _fn },
+#include "mem-memset-x86-64-asm-def.h"
+#undef MEMSET_FN
+
+#endif
+
+       { .name = NULL,
+         .desc = NULL,
+         .fn.memset = NULL   }
+};
+
+int bench_mem_memset(int argc, const char **argv,
+                    const char *prefix __maybe_unused)
+{
+       struct bench_mem_info info = {
+               .routines = memset_routines,
+               .do_cycle = do_memset_cycle,
+               .do_gettimeofday = do_memset_gettimeofday,
+               .usage = bench_mem_memset_usage,
+       };
+
+       return bench_mem_common(argc, argv, prefix, &info);
+}
diff --git a/tools/perf/bench/mem-memset.c b/tools/perf/bench/mem-memset.c
deleted file mode 100644 (file)
index 75fc3e6..0000000
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * mem-memset.c
- *
- * memset: Simple memory set in various ways
- *
- * Trivial clone of mem-memcpy.c.
- */
-
-#include "../perf.h"
-#include "../util/util.h"
-#include "../util/parse-options.h"
-#include "../util/header.h"
-#include "../util/cloexec.h"
-#include "bench.h"
-#include "mem-memset-arch.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <sys/time.h>
-#include <errno.h>
-
-#define K 1024
-
-static const char      *length_str     = "1MB";
-static const char      *routine        = "default";
-static int             iterations      = 1;
-static bool            use_cycle;
-static int             cycle_fd;
-static bool            only_prefault;
-static bool            no_prefault;
-
-static const struct option options[] = {
-       OPT_STRING('l', "length", &length_str, "1MB",
-                   "Specify length of memory to set. "
-                   "Available units: B, KB, MB, GB and TB (upper and lower)"),
-       OPT_STRING('r', "routine", &routine, "default",
-                   "Specify routine to set"),
-       OPT_INTEGER('i', "iterations", &iterations,
-                   "repeat memset() invocation this number of times"),
-       OPT_BOOLEAN('c', "cycle", &use_cycle,
-                   "Use cycles event instead of gettimeofday() for measuring"),
-       OPT_BOOLEAN('o', "only-prefault", &only_prefault,
-                   "Show only the result with page faults before memset()"),
-       OPT_BOOLEAN('n', "no-prefault", &no_prefault,
-                   "Show only the result without page faults before memset()"),
-       OPT_END()
-};
-
-typedef void *(*memset_t)(void *, int, size_t);
-
-struct routine {
-       const char *name;
-       const char *desc;
-       memset_t fn;
-};
-
-static const struct routine routines[] = {
-       { "default",
-         "Default memset() provided by glibc",
-         memset },
-#ifdef HAVE_ARCH_X86_64_SUPPORT
-
-#define MEMSET_FN(fn, name, desc) { name, desc, fn },
-#include "mem-memset-x86-64-asm-def.h"
-#undef MEMSET_FN
-
-#endif
-
-       { NULL,
-         NULL,
-         NULL   }
-};
-
-static const char * const bench_mem_memset_usage[] = {
-       "perf bench mem memset <options>",
-       NULL
-};
-
-static struct perf_event_attr cycle_attr = {
-       .type           = PERF_TYPE_HARDWARE,
-       .config         = PERF_COUNT_HW_CPU_CYCLES
-};
-
-static void init_cycle(void)
-{
-       cycle_fd = sys_perf_event_open(&cycle_attr, getpid(), -1, -1,
-                                      perf_event_open_cloexec_flag());
-
-       if (cycle_fd < 0 && errno == ENOSYS)
-               die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
-       else
-               BUG_ON(cycle_fd < 0);
-}
-
-static u64 get_cycle(void)
-{
-       int ret;
-       u64 clk;
-
-       ret = read(cycle_fd, &clk, sizeof(u64));
-       BUG_ON(ret != sizeof(u64));
-
-       return clk;
-}
-
-static double timeval2double(struct timeval *ts)
-{
-       return (double)ts->tv_sec +
-               (double)ts->tv_usec / (double)1000000;
-}
-
-static void alloc_mem(void **dst, size_t length)
-{
-       *dst = zalloc(length);
-       if (!*dst)
-               die("memory allocation failed - maybe length is too large?\n");
-}
-
-static u64 do_memset_cycle(memset_t fn, size_t len, bool prefault)
-{
-       u64 cycle_start = 0ULL, cycle_end = 0ULL;
-       void *dst = NULL;
-       int i;
-
-       alloc_mem(&dst, len);
-
-       if (prefault)
-               fn(dst, -1, len);
-
-       cycle_start = get_cycle();
-       for (i = 0; i < iterations; ++i)
-               fn(dst, i, len);
-       cycle_end = get_cycle();
-
-       free(dst);
-       return cycle_end - cycle_start;
-}
-
-static double do_memset_gettimeofday(memset_t fn, size_t len, bool prefault)
-{
-       struct timeval tv_start, tv_end, tv_diff;
-       void *dst = NULL;
-       int i;
-
-       alloc_mem(&dst, len);
-
-       if (prefault)
-               fn(dst, -1, len);
-
-       BUG_ON(gettimeofday(&tv_start, NULL));
-       for (i = 0; i < iterations; ++i)
-               fn(dst, i, len);
-       BUG_ON(gettimeofday(&tv_end, NULL));
-
-       timersub(&tv_end, &tv_start, &tv_diff);
-
-       free(dst);
-       return (double)((double)len / timeval2double(&tv_diff));
-}
-
-#define pf (no_prefault ? 0 : 1)
-
-#define print_bps(x) do {                                      \
-               if (x < K)                                      \
-                       printf(" %14lf B/Sec", x);              \
-               else if (x < K * K)                             \
-                       printf(" %14lfd KB/Sec", x / K);        \
-               else if (x < K * K * K)                         \
-                       printf(" %14lf MB/Sec", x / K / K);     \
-               else                                            \
-                       printf(" %14lf GB/Sec", x / K / K / K); \
-       } while (0)
-
-int bench_mem_memset(int argc, const char **argv,
-                    const char *prefix __maybe_unused)
-{
-       int i;
-       size_t len;
-       double result_bps[2];
-       u64 result_cycle[2];
-
-       argc = parse_options(argc, argv, options,
-                            bench_mem_memset_usage, 0);
-
-       if (no_prefault && only_prefault) {
-               fprintf(stderr, "Invalid options: -o and -n are mutually exclusive\n");
-               return 1;
-       }
-
-       if (use_cycle)
-               init_cycle();
-
-       len = (size_t)perf_atoll((char *)length_str);
-
-       result_cycle[0] = result_cycle[1] = 0ULL;
-       result_bps[0] = result_bps[1] = 0.0;
-
-       if ((s64)len <= 0) {
-               fprintf(stderr, "Invalid length:%s\n", length_str);
-               return 1;
-       }
-
-       /* same to without specifying either of prefault and no-prefault */
-       if (only_prefault && no_prefault)
-               only_prefault = no_prefault = false;
-
-       for (i = 0; routines[i].name; i++) {
-               if (!strcmp(routines[i].name, routine))
-                       break;
-       }
-       if (!routines[i].name) {
-               printf("Unknown routine:%s\n", routine);
-               printf("Available routines...\n");
-               for (i = 0; routines[i].name; i++) {
-                       printf("\t%s ... %s\n",
-                              routines[i].name, routines[i].desc);
-               }
-               return 1;
-       }
-
-       if (bench_format == BENCH_FORMAT_DEFAULT)
-               printf("# Copying %s Bytes ...\n\n", length_str);
-
-       if (!only_prefault && !no_prefault) {
-               /* show both of results */
-               if (use_cycle) {
-                       result_cycle[0] =
-                               do_memset_cycle(routines[i].fn, len, false);
-                       result_cycle[1] =
-                               do_memset_cycle(routines[i].fn, len, true);
-               } else {
-                       result_bps[0] =
-                               do_memset_gettimeofday(routines[i].fn,
-                                               len, false);
-                       result_bps[1] =
-                               do_memset_gettimeofday(routines[i].fn,
-                                               len, true);
-               }
-       } else {
-               if (use_cycle) {
-                       result_cycle[pf] =
-                               do_memset_cycle(routines[i].fn,
-                                               len, only_prefault);
-               } else {
-                       result_bps[pf] =
-                               do_memset_gettimeofday(routines[i].fn,
-                                               len, only_prefault);
-               }
-       }
-
-       switch (bench_format) {
-       case BENCH_FORMAT_DEFAULT:
-               if (!only_prefault && !no_prefault) {
-                       if (use_cycle) {
-                               printf(" %14lf Cycle/Byte\n",
-                                       (double)result_cycle[0]
-                                       / (double)len);
-                               printf(" %14lf Cycle/Byte (with prefault)\n ",
-                                       (double)result_cycle[1]
-                                       / (double)len);
-                       } else {
-                               print_bps(result_bps[0]);
-                               printf("\n");
-                               print_bps(result_bps[1]);
-                               printf(" (with prefault)\n");
-                       }
-               } else {
-                       if (use_cycle) {
-                               printf(" %14lf Cycle/Byte",
-                                       (double)result_cycle[pf]
-                                       / (double)len);
-                       } else
-                               print_bps(result_bps[pf]);
-
-                       printf("%s\n", only_prefault ? " (with prefault)" : "");
-               }
-               break;
-       case BENCH_FORMAT_SIMPLE:
-               if (!only_prefault && !no_prefault) {
-                       if (use_cycle) {
-                               printf("%lf %lf\n",
-                                       (double)result_cycle[0] / (double)len,
-                                       (double)result_cycle[1] / (double)len);
-                       } else {
-                               printf("%lf %lf\n",
-                                       result_bps[0], result_bps[1]);
-                       }
-               } else {
-                       if (use_cycle) {
-                               printf("%lf\n", (double)result_cycle[pf]
-                                       / (double)len);
-                       } else
-                               printf("%lf\n", result_bps[pf]);
-               }
-               break;
-       default:
-               /* reaching this means there's some disaster: */
-               die("unknown format: %d\n", bench_format);
-               break;
-       }
-
-       return 0;
-}
index 70385756da63f12bcfafe362bc55b1f157e8feae..77d5cae54c6ac3dbed34267fd542303b4253079c 100644 (file)
@@ -285,12 +285,11 @@ int cmd_buildid_cache(int argc, const char **argv,
        struct str_node *pos;
        int ret = 0;
        bool force = false;
-       char debugdir[PATH_MAX];
        char const *add_name_list_str = NULL,
                   *remove_name_list_str = NULL,
                   *missing_filename = NULL,
                   *update_name_list_str = NULL,
-                  *kcore_filename;
+                  *kcore_filename = NULL;
        char sbuf[STRERR_BUFSIZE];
 
        struct perf_data_file file = {
@@ -335,13 +334,11 @@ int cmd_buildid_cache(int argc, const char **argv,
 
        setup_pager();
 
-       snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
-
        if (add_name_list_str) {
                list = strlist__new(true, add_name_list_str);
                if (list) {
                        strlist__for_each(pos, list)
-                               if (build_id_cache__add_file(pos->s, debugdir)) {
+                               if (build_id_cache__add_file(pos->s, buildid_dir)) {
                                        if (errno == EEXIST) {
                                                pr_debug("%s already in the cache\n",
                                                         pos->s);
@@ -359,7 +356,7 @@ int cmd_buildid_cache(int argc, const char **argv,
                list = strlist__new(true, remove_name_list_str);
                if (list) {
                        strlist__for_each(pos, list)
-                               if (build_id_cache__remove_file(pos->s, debugdir)) {
+                               if (build_id_cache__remove_file(pos->s, buildid_dir)) {
                                        if (errno == ENOENT) {
                                                pr_debug("%s wasn't in the cache\n",
                                                         pos->s);
@@ -380,7 +377,7 @@ int cmd_buildid_cache(int argc, const char **argv,
                list = strlist__new(true, update_name_list_str);
                if (list) {
                        strlist__for_each(pos, list)
-                               if (build_id_cache__update_file(pos->s, debugdir)) {
+                               if (build_id_cache__update_file(pos->s, buildid_dir)) {
                                        if (errno == ENOENT) {
                                                pr_debug("%s wasn't in the cache\n",
                                                         pos->s);
@@ -395,7 +392,7 @@ int cmd_buildid_cache(int argc, const char **argv,
        }
 
        if (kcore_filename &&
-           build_id_cache__add_kcore(kcore_filename, debugdir, force))
+           build_id_cache__add_kcore(kcore_filename, buildid_dir, force))
                pr_warning("Couldn't add %s\n", kcore_filename);
 
 out:
index 3c0f3d4fb021b4b0b6730c23e764d931d23faab2..0894a817f67e6f5bb8a5d6398c9e2307f010efc6 100644 (file)
@@ -1293,7 +1293,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
                OPT_UINTEGER('d', "display", &kvm->display_time,
                        "time in seconds between display updates"),
                OPT_STRING(0, "event", &kvm->report_event, "report event",
-                       "event for reporting: vmexit, mmio, ioport"),
+                       "event for reporting: "
+                       "vmexit, mmio (x86 only), ioport (x86 only)"),
                OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu,
                        "vcpu id to report"),
                OPT_STRING('k', "key", &kvm->sort_key, "sort-key",
index 83a4835c8118c5d4fa45aa9f4010e6e4cf33e45d..badfabc6a01f6f1a90d1988abc5ac6178021bb08 100644 (file)
@@ -2045,7 +2045,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
        unsigned long before;
        const bool forks = argc > 0;
        bool draining = false;
-       char sbuf[STRERR_BUFSIZE];
 
        trace->live = true;
 
@@ -2106,11 +2105,8 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
                goto out_error_open;
 
        err = perf_evlist__mmap(evlist, trace->opts.mmap_pages, false);
-       if (err < 0) {
-               fprintf(trace->output, "Couldn't mmap the events: %s\n",
-                       strerror_r(errno, sbuf, sizeof(sbuf)));
-               goto out_delete_evlist;
-       }
+       if (err < 0)
+               goto out_error_mmap;
 
        perf_evlist__enable(evlist);
 
@@ -2210,6 +2206,10 @@ out_error_tp:
        perf_evlist__strerror_tp(evlist, errno, errbuf, sizeof(errbuf));
        goto out_error;
 
+out_error_mmap:
+       perf_evlist__strerror_mmap(evlist, errno, errbuf, sizeof(errbuf));
+       goto out_error;
+
 out_error_open:
        perf_evlist__strerror_open(evlist, errno, errbuf, sizeof(errbuf));
 
@@ -2485,7 +2485,7 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
                        .user_freq     = UINT_MAX,
                        .user_interval = ULLONG_MAX,
                        .no_buffering  = true,
-                       .mmap_pages    = 1024,
+                       .mmap_pages    = UINT_MAX,
                },
                .output = stdout,
                .show_comm = true,
index 452a8474d29d8cedbed3e47f46a4ebc728663907..3700a7faca6cf4df09be10ebc0b98376b10e84b6 100644 (file)
@@ -200,6 +200,16 @@ static int handle_options(const char ***argv, int *argc, int *envchanged)
                                *envchanged = 1;
                        (*argv)++;
                        (*argc)--;
+               } else if (!strcmp(cmd, "--buildid-dir")) {
+                       if (*argc < 2) {
+                               fprintf(stderr, "No directory given for --buildid-dir.\n");
+                               usage(perf_usage_string);
+                       }
+                       set_buildid_dir((*argv)[1]);
+                       if (envchanged)
+                               *envchanged = 1;
+                       (*argv)++;
+                       (*argc)--;
                } else if (!prefixcmp(cmd, CMD_DEBUGFS_DIR)) {
                        perf_debugfs_set_path(cmd + strlen(CMD_DEBUGFS_DIR));
                        fprintf(stderr, "dir: %s\n", debugfs_mountpoint);
@@ -499,7 +509,7 @@ int main(int argc, const char **argv)
        }
        if (!prefixcmp(cmd, "trace")) {
 #ifdef HAVE_LIBAUDIT_SUPPORT
-               set_buildid_dir();
+               set_buildid_dir(NULL);
                setup_path();
                argv[0] = "trace";
                return cmd_trace(argc, argv, NULL);
@@ -514,7 +524,7 @@ int main(int argc, const char **argv)
        argc--;
        handle_options(&argv, &argc, NULL);
        commit_pager_choice();
-       set_buildid_dir();
+       set_buildid_dir(NULL);
 
        if (argc > 0) {
                if (!prefixcmp(argv[0], "--"))
index f710b92ccff6774ef93bfdb2117b492339098ff8..d3095dafed36d23d4e5c8bf1357eff447c187894 100644 (file)
@@ -5,7 +5,7 @@ group_fd=-1
 flags=0|8
 cpu=*
 type=0|1
-size=96
+size=104
 config=0
 sample_period=4000
 sample_type=263
index dc3ada2470c01cd223e45a379b485af64b2e196d..872ed7e24c7c5e7bd3f2f7d0da3f42302ef8aae6 100644 (file)
@@ -5,7 +5,7 @@ group_fd=-1
 flags=0|8
 cpu=*
 type=0
-size=96
+size=104
 config=0
 sample_period=0
 sample_type=0
index 502daff76ceba70be29243aecc13bab3040c0a20..e6bb04b5b09b863013e4d361120269d59f6207c6 100644 (file)
@@ -1252,7 +1252,7 @@ static int hists__browser_title(struct hists *hists,
 
        nr_samples = convert_unit(nr_samples, &unit);
        printed = scnprintf(bf, size,
-                          "Samples: %lu%c of event '%s', Event count (approx.): %lu",
+                          "Samples: %lu%c of event '%s', Event count (approx.): %" PRIu64,
                           nr_samples, unit, ev_name, nr_events);
 
 
index 2af18376b0772ea5094e1f6277afa343533a484c..dc0d095f318c7da2868352d5a4c048a5dde40251 100644 (file)
@@ -162,8 +162,8 @@ static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
                return ret;
 
        nr_members = evsel->nr_members;
-       fields_a = calloc(sizeof(*fields_a), nr_members);
-       fields_b = calloc(sizeof(*fields_b), nr_members);
+       fields_a = calloc(nr_members, sizeof(*fields_a));
+       fields_b = calloc(nr_members, sizeof(*fields_b));
 
        if (!fields_a || !fields_b)
                goto out;
index e8d79e5bfaf75c921f156752a6d14829b76ac872..0c72680a977fb8a5dcf01384720e295384bb9648 100644 (file)
@@ -410,21 +410,18 @@ int perf_session__cache_build_ids(struct perf_session *session)
 {
        struct rb_node *nd;
        int ret;
-       char debugdir[PATH_MAX];
 
        if (no_buildid_cache)
                return 0;
 
-       snprintf(debugdir, sizeof(debugdir), "%s", buildid_dir);
-
-       if (mkdir(debugdir, 0755) != 0 && errno != EEXIST)
+       if (mkdir(buildid_dir, 0755) != 0 && errno != EEXIST)
                return -1;
 
-       ret = machine__cache_build_ids(&session->machines.host, debugdir);
+       ret = machine__cache_build_ids(&session->machines.host, buildid_dir);
 
        for (nd = rb_first(&session->machines.guests); nd; nd = rb_next(nd)) {
                struct machine *pos = rb_entry(nd, struct machine, rb_node);
-               ret |= machine__cache_build_ids(pos, debugdir);
+               ret |= machine__cache_build_ids(pos, buildid_dir);
        }
        return ret ? -1 : 0;
 }
index cf524a35cc841a9784a77b3da4794409130283c9..64b377e591e457746138173cfa59533f887e3d56 100644 (file)
@@ -77,7 +77,7 @@ int parse_callchain_record_opt(const char *arg)
                                ret = 0;
                        } else
                                pr_err("callchain: No more arguments "
-                                      "needed for -g fp\n");
+                                      "needed for --call-graph fp\n");
                        break;
 
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
index 57ff826f150bfe4db2f89814a1e2a77be54aebfa..e18f653cd7db80f907e1a7ae00ccf6e4d6438988 100644 (file)
@@ -522,7 +522,7 @@ static int buildid_dir_command_config(const char *var, const char *value,
        const char *v;
 
        /* same dir for all commands */
-       if (!prefixcmp(var, "buildid.") && !strcmp(var + 8, "dir")) {
+       if (!strcmp(var, "buildid.dir")) {
                v = perf_config_dirname(var, value);
                if (!v)
                        return -1;
@@ -539,12 +539,14 @@ static void check_buildid_dir_config(void)
        perf_config(buildid_dir_command_config, &c);
 }
 
-void set_buildid_dir(void)
+void set_buildid_dir(const char *dir)
 {
-       buildid_dir[0] = '\0';
+       if (dir)
+               scnprintf(buildid_dir, MAXPATHLEN-1, "%s", dir);
 
        /* try config file */
-       check_buildid_dir_config();
+       if (buildid_dir[0] == '\0')
+               check_buildid_dir_config();
 
        /* default to $HOME/.debug */
        if (buildid_dir[0] == '\0') {
index cfbe2b99b9aa5826b60fc574a211a115fc6f6a2d..cbab1fb77b1d6c4efb8565144256e6f1c5a6d540 100644 (file)
@@ -8,6 +8,7 @@
  */
 #include "util.h"
 #include <api/fs/debugfs.h>
+#include <api/fs/fs.h>
 #include <poll.h>
 #include "cpumap.h"
 #include "thread_map.h"
@@ -24,6 +25,7 @@
 
 #include <linux/bitops.h>
 #include <linux/hash.h>
+#include <linux/log2.h>
 
 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
@@ -892,10 +894,24 @@ out_unmap:
 
 static size_t perf_evlist__mmap_size(unsigned long pages)
 {
-       /* 512 kiB: default amount of unprivileged mlocked memory */
-       if (pages == UINT_MAX)
-               pages = (512 * 1024) / page_size;
-       else if (!is_power_of_2(pages))
+       if (pages == UINT_MAX) {
+               int max;
+
+               if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
+                       /*
+                        * Pick a once upon a time good value, i.e. things look
+                        * strange since we can't read a sysctl value, but lets not
+                        * die yet...
+                        */
+                       max = 512;
+               } else {
+                       max -= (page_size / 1024);
+               }
+
+               pages = (max * 1024) / page_size;
+               if (!is_power_of_2(pages))
+                       pages = rounddown_pow_of_two(pages);
+       } else if (!is_power_of_2(pages))
                return 0;
 
        return (pages + 1) * page_size;
@@ -932,7 +948,7 @@ static long parse_pages_arg(const char *str, unsigned long min,
                /* leave number of pages at 0 */
        } else if (!is_power_of_2(pages)) {
                /* round pages up to next power of 2 */
-               pages = next_pow2_l(pages);
+               pages = roundup_pow_of_two(pages);
                if (!pages)
                        return -EINVAL;
                pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
@@ -1483,6 +1499,37 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
        return 0;
 }
 
+int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size)
+{
+       char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
+       int pages_attempted = evlist->mmap_len / 1024, pages_max_per_user, printed = 0;
+
+       switch (err) {
+       case EPERM:
+               sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
+               printed += scnprintf(buf + printed, size - printed,
+                                    "Error:\t%s.\n"
+                                    "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
+                                    "Hint:\tTried using %zd kB.\n",
+                                    emsg, pages_max_per_user, pages_attempted);
+
+               if (pages_attempted >= pages_max_per_user) {
+                       printed += scnprintf(buf + printed, size - printed,
+                                            "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
+                                            pages_max_per_user + pages_attempted);
+               }
+
+               printed += scnprintf(buf + printed, size - printed,
+                                    "Hint:\tTry using a smaller -m/--mmap-pages value.");
+               break;
+       default:
+               scnprintf(buf, size, "%s", emsg);
+               break;
+       }
+
+       return 0;
+}
+
 void perf_evlist__to_front(struct perf_evlist *evlist,
                           struct perf_evsel *move_evsel)
 {
index 649b0c597283427572ffb2e43cf0521cb4a971fb..0ba93f67ab946839fb3576d6285cf08cafc4810c 100644 (file)
@@ -185,6 +185,7 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp);
 
 int perf_evlist__strerror_tp(struct perf_evlist *evlist, int err, char *buf, size_t size);
 int perf_evlist__strerror_open(struct perf_evlist *evlist, int err, char *buf, size_t size);
+int perf_evlist__strerror_mmap(struct perf_evlist *evlist, int err, char *buf, size_t size);
 
 static inline unsigned int perf_mmap__read_head(struct perf_mmap *mm)
 {
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
deleted file mode 100644 (file)
index c329416..0000000
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef _PERF_LINUX_BITOPS_H_
-#define _PERF_LINUX_BITOPS_H_
-
-#include <linux/kernel.h>
-#include <linux/compiler.h>
-#include <asm/hweight.h>
-
-#ifndef __WORDSIZE
-#define __WORDSIZE (__SIZEOF_LONG__ * 8)
-#endif
-
-#define BITS_PER_LONG __WORDSIZE
-#define BITS_PER_BYTE           8
-#define BITS_TO_LONGS(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#define BITS_TO_U64(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
-#define BITS_TO_U32(nr)         DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
-#define BITS_TO_BYTES(nr)       DIV_ROUND_UP(nr, BITS_PER_BYTE)
-#define BIT_WORD(nr)            ((nr) / BITS_PER_LONG)
-#define BIT_MASK(nr)            (1UL << ((nr) % BITS_PER_LONG))
-
-#define for_each_set_bit(bit, addr, size) \
-       for ((bit) = find_first_bit((addr), (size));            \
-            (bit) < (size);                                    \
-            (bit) = find_next_bit((addr), (size), (bit) + 1))
-
-/* same as for_each_set_bit() but use bit as value to start with */
-#define for_each_set_bit_from(bit, addr, size) \
-       for ((bit) = find_next_bit((addr), (size), (bit));      \
-            (bit) < (size);                                    \
-            (bit) = find_next_bit((addr), (size), (bit) + 1))
-
-static inline void set_bit(int nr, unsigned long *addr)
-{
-       addr[nr / BITS_PER_LONG] |= 1UL << (nr % BITS_PER_LONG);
-}
-
-static inline void clear_bit(int nr, unsigned long *addr)
-{
-       addr[nr / BITS_PER_LONG] &= ~(1UL << (nr % BITS_PER_LONG));
-}
-
-static __always_inline int test_bit(unsigned int nr, const unsigned long *addr)
-{
-       return ((1UL << (nr % BITS_PER_LONG)) &
-               (((unsigned long *)addr)[nr / BITS_PER_LONG])) != 0;
-}
-
-static inline unsigned long hweight_long(unsigned long w)
-{
-       return sizeof(w) == 4 ? hweight32(w) : hweight64(w);
-}
-
-#define BITOP_WORD(nr)         ((nr) / BITS_PER_LONG)
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static __always_inline unsigned long __ffs(unsigned long word)
-{
-       int num = 0;
-
-#if BITS_PER_LONG == 64
-       if ((word & 0xffffffff) == 0) {
-               num += 32;
-               word >>= 32;
-       }
-#endif
-       if ((word & 0xffff) == 0) {
-               num += 16;
-               word >>= 16;
-       }
-       if ((word & 0xff) == 0) {
-               num += 8;
-               word >>= 8;
-       }
-       if ((word & 0xf) == 0) {
-               num += 4;
-               word >>= 4;
-       }
-       if ((word & 0x3) == 0) {
-               num += 2;
-               word >>= 2;
-       }
-       if ((word & 0x1) == 0)
-               num += 1;
-       return num;
-}
-
-typedef const unsigned long __attribute__((__may_alias__)) long_alias_t;
-
-/*
- * Find the first set bit in a memory region.
- */
-static inline unsigned long
-find_first_bit(const unsigned long *addr, unsigned long size)
-{
-       long_alias_t *p = (long_alias_t *) addr;
-       unsigned long result = 0;
-       unsigned long tmp;
-
-       while (size & ~(BITS_PER_LONG-1)) {
-               if ((tmp = *(p++)))
-                       goto found;
-               result += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-       }
-       if (!size)
-               return result;
-
-       tmp = (*p) & (~0UL >> (BITS_PER_LONG - size));
-       if (tmp == 0UL)         /* Are any bits set? */
-               return result + size;   /* Nope. */
-found:
-       return result + __ffs(tmp);
-}
-
-/*
- * Find the next set bit in a memory region.
- */
-static inline unsigned long
-find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset)
-{
-       const unsigned long *p = addr + BITOP_WORD(offset);
-       unsigned long result = offset & ~(BITS_PER_LONG-1);
-       unsigned long tmp;
-
-       if (offset >= size)
-               return size;
-       size -= result;
-       offset %= BITS_PER_LONG;
-       if (offset) {
-               tmp = *(p++);
-               tmp &= (~0UL << offset);
-               if (size < BITS_PER_LONG)
-                       goto found_first;
-               if (tmp)
-                       goto found_middle;
-               size -= BITS_PER_LONG;
-               result += BITS_PER_LONG;
-       }
-       while (size & ~(BITS_PER_LONG-1)) {
-               if ((tmp = *(p++)))
-                       goto found_middle;
-               result += BITS_PER_LONG;
-               size -= BITS_PER_LONG;
-       }
-       if (!size)
-               return result;
-       tmp = *p;
-
-found_first:
-       tmp &= (~0UL >> (BITS_PER_LONG - size));
-       if (tmp == 0UL)         /* Are any bits set? */
-               return result + size;   /* Nope. */
-found_middle:
-       return result + __ffs(tmp);
-}
-
-#endif
index 15dd0a9691ceea7efe0d091b43aa0c46b191061d..94de3e48b4909a03a7e7037f779074e5bf31ff8a 100644 (file)
@@ -1385,19 +1385,46 @@ struct mem_info *sample__resolve_mem(struct perf_sample *sample,
 static int add_callchain_ip(struct thread *thread,
                            struct symbol **parent,
                            struct addr_location *root_al,
-                           int cpumode,
+                           bool branch_history,
                            u64 ip)
 {
        struct addr_location al;
 
        al.filtered = 0;
        al.sym = NULL;
-       if (cpumode == -1)
+       if (branch_history)
                thread__find_cpumode_addr_location(thread, MAP__FUNCTION,
                                                   ip, &al);
-       else
+       else {
+               u8 cpumode = PERF_RECORD_MISC_USER;
+
+               if (ip >= PERF_CONTEXT_MAX) {
+                       switch (ip) {
+                       case PERF_CONTEXT_HV:
+                               cpumode = PERF_RECORD_MISC_HYPERVISOR;
+                               break;
+                       case PERF_CONTEXT_KERNEL:
+                               cpumode = PERF_RECORD_MISC_KERNEL;
+                               break;
+                       case PERF_CONTEXT_USER:
+                               cpumode = PERF_RECORD_MISC_USER;
+                               break;
+                       default:
+                               pr_debug("invalid callchain context: "
+                                        "%"PRId64"\n", (s64) ip);
+                               /*
+                                * It seems the callchain is corrupted.
+                                * Discard all.
+                                */
+                               callchain_cursor_reset(&callchain_cursor);
+                               return 1;
+                       }
+                       return 0;
+               }
                thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
                                   ip, &al);
+       }
+
        if (al.sym != NULL) {
                if (sort__has_parent && !*parent &&
                    symbol__match_regex(al.sym, &parent_regex))
@@ -1480,11 +1507,8 @@ static int thread__resolve_callchain_sample(struct thread *thread,
                                             struct addr_location *root_al,
                                             int max_stack)
 {
-       u8 cpumode = PERF_RECORD_MISC_USER;
        int chain_nr = min(max_stack, (int)chain->nr);
-       int i;
-       int j;
-       int err;
+       int i, j, err;
        int skip_idx = -1;
        int first_call = 0;
 
@@ -1542,10 +1566,10 @@ static int thread__resolve_callchain_sample(struct thread *thread,
 
                for (i = 0; i < nr; i++) {
                        err = add_callchain_ip(thread, parent, root_al,
-                                              -1, be[i].to);
+                                              true, be[i].to);
                        if (!err)
                                err = add_callchain_ip(thread, parent, root_al,
-                                                      -1, be[i].from);
+                                                      true, be[i].from);
                        if (err == -EINVAL)
                                break;
                        if (err)
@@ -1574,36 +1598,10 @@ check_calls:
 #endif
                ip = chain->ips[j];
 
-               if (ip >= PERF_CONTEXT_MAX) {
-                       switch (ip) {
-                       case PERF_CONTEXT_HV:
-                               cpumode = PERF_RECORD_MISC_HYPERVISOR;
-                               break;
-                       case PERF_CONTEXT_KERNEL:
-                               cpumode = PERF_RECORD_MISC_KERNEL;
-                               break;
-                       case PERF_CONTEXT_USER:
-                               cpumode = PERF_RECORD_MISC_USER;
-                               break;
-                       default:
-                               pr_debug("invalid callchain context: "
-                                        "%"PRId64"\n", (s64) ip);
-                               /*
-                                * It seems the callchain is corrupted.
-                                * Discard all.
-                                */
-                               callchain_cursor_reset(&callchain_cursor);
-                               return 0;
-                       }
-                       continue;
-               }
+               err = add_callchain_ip(thread, parent, root_al, false, ip);
 
-               err = add_callchain_ip(thread, parent, root_al,
-                                      cpumode, ip);
-               if (err == -EINVAL)
-                       break;
                if (err)
-                       return err;
+                       return (err < 0) ? err : 0;
        }
 
        return 0;
index cf69325b985f15534fcb54303b98cfd5409d6e50..8acd0df88b5c4b75063d7ad2f83a55ef4f4f6286 100644 (file)
@@ -137,16 +137,7 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
 
 static int get_max_rate(unsigned int *rate)
 {
-       char path[PATH_MAX];
-       const char *procfs = procfs__mountpoint();
-
-       if (!procfs)
-               return -1;
-
-       snprintf(path, PATH_MAX,
-                "%s/sys/kernel/perf_event_max_sample_rate", procfs);
-
-       return filename__read_int(path, (int *) rate);
+       return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
 }
 
 static int record_opts__config_freq(struct record_opts *opts)
index e73b6a5c9e0fa8772862fdd3535701b67325c5dc..c93fb0c5bd0b197190f7fbe6af9cf1840d274c4d 100644 (file)
@@ -20,7 +20,7 @@
 
 struct a2l_data {
        const char      *input;
-       unsigned long   addr;
+       u64             addr;
 
        bool            found;
        const char      *filename;
@@ -147,7 +147,7 @@ static void addr2line_cleanup(struct a2l_data *a2l)
        free(a2l);
 }
 
-static int addr2line(const char *dso_name, unsigned long addr,
+static int addr2line(const char *dso_name, u64 addr,
                     char **file, unsigned int *line, struct dso *dso)
 {
        int ret = 0;
@@ -193,7 +193,7 @@ void dso__free_a2l(struct dso *dso)
 
 #else /* HAVE_LIBBFD_SUPPORT */
 
-static int addr2line(const char *dso_name, unsigned long addr,
+static int addr2line(const char *dso_name, u64 addr,
                     char **file, unsigned int *line_nr,
                     struct dso *dso __maybe_unused)
 {
@@ -252,7 +252,7 @@ void dso__free_a2l(struct dso *dso __maybe_unused)
  */
 #define A2L_FAIL_LIMIT 123
 
-char *get_srcline(struct dso *dso, unsigned long addr, struct symbol *sym,
+char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
                  bool show_sym)
 {
        char *file = NULL;
@@ -293,10 +293,10 @@ out:
                dso__free_a2l(dso);
        }
        if (sym) {
-               if (asprintf(&srcline, "%s+%ld", show_sym ? sym->name : "",
+               if (asprintf(&srcline, "%s+%" PRIu64, show_sym ? sym->name : "",
                                        addr - sym->start) < 0)
                        return SRCLINE_UNKNOWN;
-       } else if (asprintf(&srcline, "%s[%lx]", dso->short_name, addr) < 0)
+       } else if (asprintf(&srcline, "%s[%" PRIx64 "]", dso->short_name, addr) < 0)
                return SRCLINE_UNKNOWN;
        return srcline;
 }
index fa585c63f56adcf7dfb0afede3ee55cd5e37298e..d7efb03b3f9aab2d0382afb1d031790109462233 100644 (file)
@@ -129,6 +129,7 @@ int filename__read_build_id(const char *filename, void *bf, size_t size)
 
                for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
                        void *tmp;
+                       long offset;
 
                        if (need_swap) {
                                phdr->p_type = bswap_32(phdr->p_type);
@@ -140,12 +141,13 @@ int filename__read_build_id(const char *filename, void *bf, size_t size)
                                continue;
 
                        buf_size = phdr->p_filesz;
+                       offset = phdr->p_offset;
                        tmp = realloc(buf, buf_size);
                        if (tmp == NULL)
                                goto out_free;
 
                        buf = tmp;
-                       fseek(fp, phdr->p_offset, SEEK_SET);
+                       fseek(fp, offset, SEEK_SET);
                        if (fread(buf, buf_size, 1, fp) != 1)
                                goto out_free;
 
@@ -178,6 +180,7 @@ int filename__read_build_id(const char *filename, void *bf, size_t size)
 
                for (i = 0, phdr = buf; i < ehdr.e_phnum; i++, phdr++) {
                        void *tmp;
+                       long offset;
 
                        if (need_swap) {
                                phdr->p_type = bswap_32(phdr->p_type);
@@ -189,12 +192,13 @@ int filename__read_build_id(const char *filename, void *bf, size_t size)
                                continue;
 
                        buf_size = phdr->p_filesz;
+                       offset = phdr->p_offset;
                        tmp = realloc(buf, buf_size);
                        if (tmp == NULL)
                                goto out_free;
 
                        buf = tmp;
-                       fseek(fp, phdr->p_offset, SEEK_SET);
+                       fseek(fp, offset, SEEK_SET);
                        if (fread(buf, buf_size, 1, fp) != 1)
                                goto out_free;
 
index d5eab3f3323ff42e4ac133878248b091a359f835..b86744f29eeffce074d2c11fd11c5330ac72bfaa 100644 (file)
@@ -442,23 +442,6 @@ unsigned long parse_tag_value(const char *str, struct parse_tag *tags)
        return (unsigned long) -1;
 }
 
-int filename__read_int(const char *filename, int *value)
-{
-       char line[64];
-       int fd = open(filename, O_RDONLY), err = -1;
-
-       if (fd < 0)
-               return -1;
-
-       if (read(fd, line, sizeof(line)) > 0) {
-               *value = atoi(line);
-               err = 0;
-       }
-
-       close(fd);
-       return err;
-}
-
 int filename__read_str(const char *filename, char **buf, size_t *sizep)
 {
        size_t size = 0, alloc_size = 0;
@@ -523,16 +506,9 @@ const char *get_filename_for_perf_kvm(void)
 
 int perf_event_paranoid(void)
 {
-       char path[PATH_MAX];
-       const char *procfs = procfs__mountpoint();
        int value;
 
-       if (!procfs)
-               return INT_MAX;
-
-       scnprintf(path, PATH_MAX, "%s/sys/kernel/perf_event_paranoid", procfs);
-
-       if (filename__read_int(path, &value))
+       if (sysctl__read_int("kernel/perf_event_paranoid", &value))
                return INT_MAX;
 
        return value;
index 419bee030f835c295fee546a4211afaa19f84ace..027a5153495c8c9a087b7f624ba00d6e7f309a6f 100644 (file)
@@ -153,7 +153,7 @@ extern void warning(const char *err, ...) __attribute__((format (printf, 1, 2)))
 extern void set_die_routine(void (*routine)(const char *err, va_list params) NORETURN);
 
 extern int prefixcmp(const char *str, const char *prefix);
-extern void set_buildid_dir(void);
+extern void set_buildid_dir(const char *dir);
 
 static inline const char *skip_prefix(const char *str, const char *prefix)
 {
@@ -269,35 +269,6 @@ void event_attr_init(struct perf_event_attr *attr);
 #define _STR(x) #x
 #define STR(x) _STR(x)
 
-/*
- *  Determine whether some value is a power of two, where zero is
- * *not* considered a power of two.
- */
-
-static inline __attribute__((const))
-bool is_power_of_2(unsigned long n)
-{
-       return (n != 0 && ((n & (n - 1)) == 0));
-}
-
-static inline unsigned next_pow2(unsigned x)
-{
-       if (!x)
-               return 1;
-       return 1ULL << (32 - __builtin_clz(x - 1));
-}
-
-static inline unsigned long next_pow2_l(unsigned long x)
-{
-#if BITS_PER_LONG == 64
-       if (x <= (1UL << 31))
-               return next_pow2(x);
-       return (unsigned long)next_pow2(x >> 32) << 32;
-#else
-       return next_pow2(x);
-#endif
-}
-
 size_t hex_width(u64 v);
 int hex2u64(const char *ptr, u64 *val);
 
@@ -339,11 +310,10 @@ static inline int path__join3(char *bf, size_t size,
 struct dso;
 struct symbol;
 
-char *get_srcline(struct dso *dso, unsigned long addr, struct symbol *sym,
+char *get_srcline(struct dso *dso, u64 addr, struct symbol *sym,
                  bool show_sym);
 void free_srcline(char *srcline);
 
-int filename__read_int(const char *filename, int *value);
 int filename__read_str(const char *filename, char **buf, size_t *sizep);
 int perf_event_paranoid(void);
 
index 7cdcf88659c77d75196b3e078b89b657bf53a26a..9ea91437898598bf31982f6073ec483e3d51f687 100644 (file)
@@ -199,7 +199,7 @@ int main(int argc, const char *argv[])
        }
 
        get_cpu_info(0, &cpupower_cpu_info);
-       run_as_root = !getuid();
+       run_as_root = !geteuid();
        if (run_as_root) {
                ret = uname(&uts);
                if (!ret && !strcmp(uts.machine, "x86_64") &&
index 09afe5d87f2bbe34387e1257cfcc67bd5d16d4dc..4e8fe2c7b05475ca8e6d015dd20228c5a53b957f 100644 (file)
@@ -361,7 +361,7 @@ unsigned int sysfs_get_idlestate_count(unsigned int cpu)
 
        snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
        if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
-               return -ENODEV;
+               return 0;
 
        snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
        if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
index 33a5c06d95caa038f682c411ad26df0788d16f5b..d273624c93a642544f8ba2b31d45cebbc505dc52 100644 (file)
@@ -179,11 +179,11 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
         */
        fd = open(longpath, O_RDONLY);
        if (fd > 0) {
-               printf("Invoke copy of '%s' via filename of length %lu:\n",
+               printf("Invoke copy of '%s' via filename of length %zu:\n",
                        src, strlen(longpath));
                fail += check_execveat(fd, "", AT_EMPTY_PATH);
        } else {
-               printf("Failed to open length %lu filename, errno=%d (%s)\n",
+               printf("Failed to open length %zu filename, errno=%d (%s)\n",
                        strlen(longpath), errno, strerror(errno));
                fail++;
        }
index dfe454855cd2e7a2699fd6025ab40bacb1455447..1c12536f2081e89a5f112c39cf3aa4716bd7dc94 100644 (file)
@@ -446,7 +446,7 @@ int probe_thermal_sysfs(void)
                return -1;
        }
 
-       ptdata.tzi = calloc(sizeof(struct tz_info), ptdata.max_tz_instance+1);
+       ptdata.tzi = calloc(ptdata.max_tz_instance+1, sizeof(struct tz_info));
        if (!ptdata.tzi) {
                fprintf(stderr, "Err: allocate tz_info\n");
                return -1;
@@ -454,8 +454,8 @@ int probe_thermal_sysfs(void)
 
        /* we still show thermal zone information if there is no cdev */
        if (ptdata.nr_cooling_dev) {
-               ptdata.cdi = calloc(sizeof(struct cdev_info),
-                               ptdata.max_cdev_instance + 1);
+               ptdata.cdi = calloc(ptdata.max_cdev_instance + 1,
+                               sizeof(struct cdev_info));
                if (!ptdata.cdi) {
                        free(ptdata.tzi);
                        fprintf(stderr, "Err: allocate cdev_info\n");
index f5283438ee05e165b50b693c2d864f248d82a90d..1cc6e2e199827093093e6c48eab72c6531136a13 100644 (file)
@@ -671,6 +671,7 @@ static void update_memslots(struct kvm_memslots *slots,
 
        WARN_ON(mslots[i].id != id);
        if (!new->npages) {
+               WARN_ON(!mslots[i].npages);
                new->base_gfn = 0;
                if (mslots[i].npages)
                        slots->used_slots--;
@@ -687,12 +688,25 @@ static void update_memslots(struct kvm_memslots *slots,
                slots->id_to_index[mslots[i].id] = i;
                i++;
        }
-       while (i > 0 &&
-              new->base_gfn > mslots[i - 1].base_gfn) {
-               mslots[i] = mslots[i - 1];
-               slots->id_to_index[mslots[i].id] = i;
-               i--;
-       }
+
+       /*
+        * The ">=" is needed when creating a slot with base_gfn == 0,
+        * so that it moves before all those with base_gfn == npages == 0.
+        *
+        * On the other hand, if new->npages is zero, the above loop has
+        * already left i pointing to the beginning of the empty part of
+        * mslots, and the ">=" would move the hole backwards in this
+        * case---which is wrong.  So skip the loop when deleting a slot.
+        */
+       if (new->npages) {
+               while (i > 0 &&
+                      new->base_gfn >= mslots[i - 1].base_gfn) {
+                       mslots[i] = mslots[i - 1];
+                       slots->id_to_index[mslots[i].id] = i;
+                       i--;
+               }
+       } else
+               WARN_ON_ONCE(i != slots->used_slots);
 
        mslots[i] = *new;
        slots->id_to_index[mslots[i].id] = i;