Merge tag 'staging-3.15-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 5 May 2014 22:49:38 +0000 (15:49 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 5 May 2014 22:49:38 +0000 (15:49 -0700)
Pull staging / iio fixes from Greg KH:
 "Here are some small IIO driver fixes for 3.15-rc4 that resolve some
  reported issues"

* tag 'staging-3.15-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging:
  iio: adc: Nothing in ADC should be a bool CONFIG
  iio: exynos_adc: use indio_dev->dev structure to handle child nodes
  iio:imu:mpu6050: Fixed segfault in Invensens MPU driver due to null dereference
  staging:iio:ad2s1200 fix missing parenthesis in a for statment.

189 files changed:
Documentation/devicetree/bindings/arm/arch_timer.txt
Documentation/devicetree/bindings/ata/apm-xgene.txt
Documentation/devicetree/bindings/net/socfpga-dwmac.txt
Documentation/devicetree/bindings/net/stmmac.txt
Documentation/devicetree/bindings/pinctrl/pinctrl-st.txt
Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
Documentation/devicetree/bindings/sound/tlv320aic31xx.txt
MAINTAINERS
Makefile
arch/arc/kernel/entry.S
arch/arm/boot/dts/am33xx.dtsi
arch/arm/boot/dts/am4372.dtsi
arch/arm/boot/dts/stih415-pinctrl.dtsi
arch/arm/boot/dts/stih416-pinctrl.dtsi
arch/arm/kvm/Kconfig
arch/arm/kvm/mmu.c
arch/arm64/boot/dts/apm-storm.dtsi
arch/arm64/kernel/early_printk.c
arch/arm64/kernel/setup.c
arch/arm64/mm/dma-mapping.c
arch/arm64/mm/mmu.c
arch/hexagon/include/asm/barrier.h [deleted file]
arch/parisc/include/uapi/asm/Kbuild
arch/parisc/include/uapi/asm/resource.h [deleted file]
arch/powerpc/boot/main.c
arch/powerpc/boot/ops.h
arch/powerpc/boot/ps3.c
arch/powerpc/include/asm/opal.h
arch/powerpc/include/uapi/asm/setup.h
arch/powerpc/kernel/ppc_ksyms.c
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/mm/hash_native_64.c
arch/powerpc/perf/hv-24x7.c
arch/powerpc/perf/hv-gpci.c
arch/powerpc/platforms/powernv/opal-dump.c
arch/powerpc/platforms/powernv/opal-elog.c
arch/powerpc/platforms/powernv/opal-flash.c
arch/powerpc/platforms/powernv/opal-sysparam.c
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/powernv/smp.c
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/hotplug-memory.c
arch/powerpc/sysdev/ppc4xx_pci.c
arch/s390/net/bpf_jit_comp.c
arch/x86/Makefile
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/perf_event_intel_rapl.c
arch/x86/kernel/vsmp_64.c
arch/x86/kvm/vmx.c
arch/xtensa/Kconfig
arch/xtensa/boot/dts/kc705.dts [new file with mode: 0644]
arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi [new file with mode: 0644]
arch/xtensa/boot/dts/xtfpga-flash-16m.dtsi
arch/xtensa/boot/dts/xtfpga-flash-4m.dtsi
arch/xtensa/boot/dts/xtfpga.dtsi
arch/xtensa/include/asm/bootparam.h
arch/xtensa/include/asm/fixmap.h [new file with mode: 0644]
arch/xtensa/include/asm/highmem.h
arch/xtensa/include/asm/pgtable.h
arch/xtensa/include/asm/sysmem.h [new file with mode: 0644]
arch/xtensa/include/asm/tlbflush.h
arch/xtensa/kernel/setup.c
arch/xtensa/kernel/smp.c
arch/xtensa/kernel/xtensa_ksyms.c
arch/xtensa/mm/Makefile
arch/xtensa/mm/cache.c
arch/xtensa/mm/highmem.c [new file with mode: 0644]
arch/xtensa/mm/init.c
arch/xtensa/mm/mmu.c
arch/xtensa/mm/tlb.c
arch/xtensa/platforms/iss/Makefile
arch/xtensa/platforms/xt2000/setup.c
drivers/acpi/acpi_processor.c
drivers/acpi/ec.c
drivers/base/dd.c
drivers/base/platform.c
drivers/block/floppy.c
drivers/clk/versatile/clk-vexpress-osc.c
drivers/clocksource/arm_arch_timer.c
drivers/clocksource/zevio-timer.c
drivers/cpufreq/longhaul.c
drivers/cpufreq/powernow-k6.c
drivers/cpufreq/powernow-k7.c
drivers/cpufreq/ppc-corenet-cpufreq.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_dmabuf.c
drivers/gpu/drm/exynos/exynos_drm_dsi.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fbdev.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_irq.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_irq.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/tegra/dc.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/hwmon/coretemp.c
drivers/infiniband/hw/cxgb4/Kconfig
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-crossbar.c
drivers/md/dm-cache-target.c
drivers/md/dm-thin.c
drivers/md/dm-verity.c
drivers/mtd/ubi/block.c
drivers/mtd/ubi/wl.c
drivers/of/irq.c
drivers/of/platform.c
drivers/of/selftest.c
drivers/of/testcase-data/tests-interrupts.dtsi
drivers/pinctrl/pinctrl-as3722.c
drivers/pinctrl/pinctrl-single.c
drivers/pinctrl/pinctrl-tb10x.c
drivers/pinctrl/sh-pfc/pfc-r8a7790.c
drivers/pinctrl/sh-pfc/pfc-r8a7791.c
drivers/pnp/pnpacpi/core.c
drivers/pnp/quirks.c
drivers/s390/cio/chsc.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/virtio_scsi.c
fs/aio.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/inode.c
fs/ceph/ioctl.c
fs/ceph/locks.c
fs/ceph/super.h
fs/ubifs/super.c
include/asm-generic/fixmap.h
include/asm-generic/word-at-a-time.h
include/linux/ftrace.h
include/linux/interrupt.h
include/linux/irq.h
include/linux/of_irq.h
include/trace/events/module.h
kernel/hrtimer.c
kernel/irq/irqdesc.c
kernel/module.c
kernel/softirq.c
kernel/timer.c
kernel/trace/ftrace.c
kernel/trace/trace_events_trigger.c
mm/vmacache.c
net/ceph/osdmap.c
scripts/sortextable.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_priv.h
sound/pci/hda/patch_realtek.c
sound/soc/codecs/alc5623.c
sound/soc/codecs/cs42l52.c
sound/soc/codecs/cs42l73.c
sound/soc/codecs/tlv320aic3x.c
sound/soc/fsl/fsl_spdif.h
sound/soc/intel/sst-dsp-priv.h
sound/soc/intel/sst-haswell-ipc.c
sound/soc/jz4740/Makefile
sound/soc/sh/rcar/src.c
sound/soc/sh/rcar/ssi.c
sound/soc/soc-dapm.c
tools/lib/api/fs/debugfs.c
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/perf/Makefile.perf
tools/perf/arch/x86/tests/dwarf-unwind.c
tools/perf/arch/x86/tests/regs_load.S
tools/perf/config/Makefile
tools/perf/tests/make
tools/perf/util/machine.c
virt/kvm/arm/vgic.c
virt/kvm/assigned-dev.c
virt/kvm/async_pf.c

index 06fc7602593a9d38a4cec97538948b03b5ce2676..37b2cafa4e52703b516d8c163653ae58106ad990 100644 (file)
@@ -19,6 +19,9 @@ to deliver its interrupts via SPIs.
 
 - clock-frequency : The frequency of the main counter, in Hz. Optional.
 
+- always-on : a boolean property. If present, the timer is powered through an
+  always-on power domain, therefore it never loses context.
+
 Example:
 
        timer {
index 7bcfbf59810e5a5c105740c7dca63844d7791ef2..a668f0e7d0018b76841127db20845a7fd45affd6 100644 (file)
@@ -24,6 +24,7 @@ Required properties:
   * "sata-phy" for the SATA 6.0Gbps PHY
 
 Optional properties:
+- dma-coherent         : Present if dma operations are coherent
 - status               : Shall be "ok" if enabled or "disabled" if disabled.
                          Default is "ok".
 
@@ -55,6 +56,7 @@ Example:
                              <0x0 0x1f22e000 0x0 0x1000>,
                              <0x0 0x1f227000 0x0 0x1000>;
                        interrupts = <0x0 0x87 0x4>;
+                       dma-coherent;
                        status = "ok";
                        clocks = <&sataclk 0>;
                        phys = <&phy2 0>;
@@ -69,6 +71,7 @@ Example:
                              <0x0 0x1f23e000 0x0 0x1000>,
                              <0x0 0x1f237000 0x0 0x1000>;
                        interrupts = <0x0 0x88 0x4>;
+                       dma-coherent;
                        status = "ok";
                        clocks = <&sataclk 0>;
                        phys = <&phy3 0>;
index 636f0ac4e22388b4c8934681f7a7fc3712d30a0f..2a60cd3e8d5ddb7bdf3b2caad2bc414a3d8566e0 100644 (file)
@@ -23,5 +23,5 @@ gmac0: ethernet@ff700000 {
        interrupt-names = "macirq";
        mac-address = [00 00 00 00 00 00];/* Filled in by U-Boot */
        clocks = <&emac_0_clk>;
-       clocks-names = "stmmaceth";
+       clock-names = "stmmaceth";
 };
index 80c1fb8bfbb8bd778a6682fa75d863ce51d3c0e4..a2acd2b26baf78c8aafc3948d7dc7cb012c09db5 100644 (file)
@@ -33,7 +33,7 @@ Optional properties:
 - max-frame-size: See ethernet.txt file in the same directory
 - clocks: If present, the first clock should be the GMAC main clock,
   further clocks may be specified in derived bindings.
-- clocks-names: One name for each entry in the clocks property, the
+- clock-names: One name for each entry in the clocks property, the
   first one should be "stmmaceth".
 
 Examples:
index 4bd5be0e5e7dd51eaf7cf23a92a2bf884dd264f1..26bcb18f4e609288d006eeae5bbf496730e2921f 100644 (file)
@@ -83,7 +83,7 @@ Example:
                reg             = <0xfe61f080 0x4>;
                reg-names       = "irqmux";
                interrupts      = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
-               interrupts-names = "irqmux";
+               interrupt-names = "irqmux";
                ranges          = <0 0xfe610000 0x5000>;
 
                PIO0: gpio@fe610000 {
@@ -165,7 +165,7 @@ sdhci0:sdhci@fe810000{
        interrupt-parent = <&PIO3>;
        #interrupt-cells = <2>;
        interrupts = <3 IRQ_TYPE_LEVEL_HIGH>; /* Interrupt line via PIO3-3 */
-       interrupts-names = "card-detect";
+       interrupt-names = "card-detect";
        pinctrl-names = "default";
        pinctrl-0       = <&pinctrl_mmc>;
 };
index 569b26c4a81ee25e1f141329f90903dcb28ab4e4..60ca07996458576e2fcc6f85a334e13fcec5a2c7 100644 (file)
@@ -47,7 +47,7 @@ mcasp0: mcasp0@1d00000 {
        reg = <0x100000 0x3000>;
        reg-names "mpu";
        interrupts = <82>, <83>;
-       interrupts-names = "tx", "rx";
+       interrupt-names = "tx", "rx";
        op-mode = <0>;          /* MCASP_IIS_MODE */
        tdm-slots = <2>;
        serial-dir = <
index 74c66dee3e146445b5b1593670dc52473f527165..eff12be5e789cf91bb4a5d4a21bab7f7d1b7d32c 100644 (file)
@@ -13,6 +13,9 @@ Required properties:
     "ti,tlv320aic3111" - TLV320AIC3111 (stereo speaker amp, MiniDSP)
 
 - reg - <int> -  I2C slave address
+- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
+  DVDD-supply : power supplies for the device as covered in
+  Documentation/devicetree/bindings/regulator/regulator.txt
 
 
 Optional properties:
@@ -24,9 +27,6 @@ Optional properties:
         3 or MICBIAS_AVDD - MICBIAS output is connected to AVDD
        If this node is not mentioned or if the value is unknown, then
        micbias is set to 2.0V.
-- HPVDD-supply, SPRVDD-supply, SPLVDD-supply, AVDD-supply, IOVDD-supply,
-  DVDD-supply : power supplies for the device as covered in
-  Documentation/devicetree/bindings/regulator/regulator.txt
 
 CODEC output pins:
   * HPL
index e67ea244204163a5d0eb9e43239c5ccd4394bae1..181f43077a030b6a321cf62547ecea5ac9e3a3f2 100644 (file)
@@ -3485,6 +3485,12 @@ S:       Maintained
 F:     drivers/extcon/
 F:     Documentation/extcon/
 
+EXYNOS DP DRIVER
+M:     Jingoo Han <jg1.han@samsung.com>
+L:     dri-devel@lists.freedesktop.org
+S:     Maintained
+F:     drivers/gpu/drm/exynos/exynos_dp*
+
 EXYNOS MIPI DISPLAY DRIVERS
 M:     Inki Dae <inki.dae@samsung.com>
 M:     Donghwa Lee <dh09.lee@samsung.com>
@@ -3550,7 +3556,7 @@ F:        include/scsi/libfcoe.h
 F:     include/uapi/scsi/fc/
 
 FILE LOCKING (flock() and fcntl()/lockf())
-M:     Jeff Layton <jlayton@redhat.com>
+M:     Jeff Layton <jlayton@poochiereds.net>
 M:     J. Bruce Fields <bfields@fieldses.org>
 L:     linux-fsdevel@vger.kernel.org
 S:     Maintained
@@ -5108,14 +5114,19 @@ F:      drivers/s390/kvm/
 
 KERNEL VIRTUAL MACHINE (KVM) FOR ARM
 M:     Christoffer Dall <christoffer.dall@linaro.org>
+M:     Marc Zyngier <marc.zyngier@arm.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.cs.columbia.edu
 W:     http://systems.cs.columbia.edu/projects/kvm-arm
 S:     Supported
 F:     arch/arm/include/uapi/asm/kvm*
 F:     arch/arm/include/asm/kvm*
 F:     arch/arm/kvm/
+F:     virt/kvm/arm/
+F:     include/kvm/arm_*
 
 KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
+M:     Christoffer Dall <christoffer.dall@linaro.org>
 M:     Marc Zyngier <marc.zyngier@arm.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     kvmarm@lists.cs.columbia.edu
@@ -7293,7 +7304,7 @@ F:        Documentation/blockdev/ramdisk.txt
 F:     drivers/block/brd.c
 
 RANDOM NUMBER DRIVER
-M:     Theodore Ts'o" <tytso@mit.edu>
+M:     "Theodore Ts'o" <tytso@mit.edu>
 S:     Maintained
 F:     drivers/char/random.c
 
index 041c685e11ea0d24a2946c4384a06e22a8de93d2..28a7259e0f3b53ed0ee9242fcea45abc71c004f1 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 15
 SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
index 819dd5f7eb055ec87f082188e3d7ebb4a8b683aa..29b82adbf0b401685b269f81a22d29ffd4fd97a5 100644 (file)
@@ -614,11 +614,13 @@ resume_user_mode_begin:
 
 resume_kernel_mode:
 
-#ifdef CONFIG_PREEMPT
-
-       ; This is a must for preempt_schedule_irq()
+       ; Disable Interrupts from this point on
+       ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq()
+       ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe
        IRQ_DISABLE     r9
 
+#ifdef CONFIG_PREEMPT
+
        ; Can't preempt if preemption disabled
        GET_CURR_THR_INFO_FROM_SP   r10
        ld  r8, [r10, THREAD_INFO_PREEMPT_COUNT]
index 07f283c20eb12260f362753e544ddd2e6584b75d..cb6811e5ae5a9a148321d4e667dc34cde9bc9846 100644 (file)
                              <0x46000000 0x400000>;
                        reg-names = "mpu", "dat";
                        interrupts = <80>, <81>;
-                       interrupts-names = "tx", "rx";
+                       interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 8>,
                                <&edma 9>;
                              <0x46400000 0x400000>;
                        reg-names = "mpu", "dat";
                        interrupts = <82>, <83>;
-                       interrupts-names = "tx", "rx";
+                       interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 10>,
                                <&edma 11>;
index 36d523a268314d3e1948dd894ae6b07141ac946e..d1f8707ff1dfc46d65815ef5ae2c36dbc1c46006 100644 (file)
                              <0x46000000 0x400000>;
                        reg-names = "mpu", "dat";
                        interrupts = <80>, <81>;
-                       interrupts-names = "tx", "rx";
+                       interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 8>,
                               <&edma 9>;
                              <0x46400000 0x400000>;
                        reg-names = "mpu", "dat";
                        interrupts = <82>, <83>;
-                       interrupts-names = "tx", "rx";
+                       interrupt-names = "tx", "rx";
                        status = "disabled";
                        dmas = <&edma 10>,
                               <&edma 11>;
index f09fb10a3791a7e4fc238f4e47548fc7f86da705..81df870e5ee6791530b3902aab65ea6576bf47f8 100644 (file)
@@ -49,7 +49,7 @@
                        reg             = <0xfe61f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfe610000 0x5000>;
 
                        PIO0: gpio@fe610000 {
                        reg             = <0xfee0f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 181 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfee00000 0x8000>;
 
                        PIO5: gpio@fee00000 {
                        reg             = <0xfe82f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfe820000 0x8000>;
 
                        PIO13: gpio@fe820000 {
                        reg             = <0xfd6bf080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfd6b0000 0x3000>;
 
                        PIO100: gpio@fd6b0000 {
                        reg             = <0xfd33f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfd330000 0x5000>;
 
                        PIO103: gpio@fd330000 {
index aeea304086eb3b57c5682539643f0d6ca4540c0d..250d5ecc951ea0e3e5c7f071fb4e38b6312840d7 100644 (file)
@@ -53,7 +53,7 @@
                        reg             = <0xfe61f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfe610000 0x6000>;
 
                        PIO0: gpio@fe610000 {
                        reg             = <0xfee0f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfee00000 0x10000>;
 
                        PIO5: gpio@fee00000 {
                        reg             = <0xfe82f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfe820000 0x6000>;
 
                        PIO13: gpio@fe820000 {
                        reg             = <0xfd6bf080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges          = <0 0xfd6b0000 0x3000>;
 
                        PIO100: gpio@fd6b0000 {
                        reg             = <0xfd33f080 0x4>;
                        reg-names       = "irqmux";
                        interrupts      = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>;
-                       interrupts-names = "irqmux";
+                       interrupt-names = "irqmux";
                        ranges                  = <0 0xfd330000 0x5000>;
 
                        PIO103: gpio@fd330000 {
index 466bd299b1a8aad54949364d976d9c5430c2375e..4be5bb150bdddea694fbf71bffa6dd8e9b855177 100644 (file)
@@ -23,7 +23,7 @@ config KVM
        select HAVE_KVM_CPU_RELAX_INTERCEPT
        select KVM_MMIO
        select KVM_ARM_HOST
-       depends on ARM_VIRT_EXT && ARM_LPAE
+       depends on ARM_VIRT_EXT && ARM_LPAE && !CPU_BIG_ENDIAN
        ---help---
          Support hosting virtualized guest machines. You will also
          need to select one or more of the processor modules below.
index 80bb1e6c2c2906d0764ae5b696e72053c8faff9c..16f804938b8fea9fee56fa93991cf8c45cf141e5 100644 (file)
@@ -42,6 +42,8 @@ static unsigned long hyp_idmap_start;
 static unsigned long hyp_idmap_end;
 static phys_addr_t hyp_idmap_vector;
 
+#define pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
+
 #define kvm_pmd_huge(_x)       (pmd_huge(_x) || pmd_trans_huge(_x))
 
 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
@@ -293,14 +295,14 @@ void free_boot_hyp_pgd(void)
        if (boot_hyp_pgd) {
                unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
                unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
-               kfree(boot_hyp_pgd);
+               free_pages((unsigned long)boot_hyp_pgd, pgd_order);
                boot_hyp_pgd = NULL;
        }
 
        if (hyp_pgd)
                unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
 
-       kfree(init_bounce_page);
+       free_page((unsigned long)init_bounce_page);
        init_bounce_page = NULL;
 
        mutex_unlock(&kvm_hyp_pgd_mutex);
@@ -330,7 +332,7 @@ void free_hyp_pgds(void)
                for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
                        unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
 
-               kfree(hyp_pgd);
+               free_pages((unsigned long)hyp_pgd, pgd_order);
                hyp_pgd = NULL;
        }
 
@@ -1024,7 +1026,7 @@ int kvm_mmu_init(void)
                size_t len = __hyp_idmap_text_end - __hyp_idmap_text_start;
                phys_addr_t phys_base;
 
-               init_bounce_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               init_bounce_page = (void *)__get_free_page(GFP_KERNEL);
                if (!init_bounce_page) {
                        kvm_err("Couldn't allocate HYP init bounce page\n");
                        err = -ENOMEM;
@@ -1050,8 +1052,9 @@ int kvm_mmu_init(void)
                         (unsigned long)phys_base);
        }
 
-       hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
-       boot_hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL);
+       hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+       boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, pgd_order);
+
        if (!hyp_pgd || !boot_hyp_pgd) {
                kvm_err("Hyp mode PGD not allocated\n");
                err = -ENOMEM;
index 93f4b2dd92484863e8015da4a622a0c17745de5a..f8c40a66e65ddb3d3a4c327379d0eec2b234ce50 100644 (file)
                              <0x0 0x1f21e000 0x0 0x1000>,
                              <0x0 0x1f217000 0x0 0x1000>;
                        interrupts = <0x0 0x86 0x4>;
+                       dma-coherent;
                        status = "disabled";
                        clocks = <&sata01clk 0>;
                        phys = <&phy1 0>;
                              <0x0 0x1f22e000 0x0 0x1000>,
                              <0x0 0x1f227000 0x0 0x1000>;
                        interrupts = <0x0 0x87 0x4>;
+                       dma-coherent;
                        status = "ok";
                        clocks = <&sata23clk 0>;
                        phys = <&phy2 0>;
                              <0x0 0x1f23d000 0x0 0x1000>,
                              <0x0 0x1f23e000 0x0 0x1000>;
                        interrupts = <0x0 0x88 0x4>;
+                       dma-coherent;
                        status = "ok";
                        clocks = <&sata45clk 0>;
                        phys = <&phy3 0>;
index ffbbdde7aba10480c12b41d552d1fb41da6097df..2dc36d00addffad4a4bd10ef0a6b1bac21170a49 100644 (file)
@@ -143,10 +143,8 @@ static int __init setup_early_printk(char *buf)
        }
        /* no options parsing yet */
 
-       if (paddr) {
-               set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr);
-               early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE);
-       }
+       if (paddr)
+               early_base = (void __iomem *)set_fixmap_offset_io(FIX_EARLYCON_MEM_BASE, paddr);
 
        printch = match->printch;
        early_console = &early_console_dev;
index 93e7df8968fe123d40ce7ad2a017ac88aef7e118..7ec784653b29fad2b1eba1c49a21e9e499a6c167 100644 (file)
@@ -396,7 +396,7 @@ static int __init arm64_device_init(void)
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
        return 0;
 }
-arch_initcall(arm64_device_init);
+arch_initcall_sync(arm64_device_init);
 
 static DEFINE_PER_CPU(struct cpu, cpu_data);
 
index 0ba347e59f06a7dbfe3fe7dcc884f9435c791d6e..c851eb44dc505f8b250b7e1205b1a5ccb35afc8c 100644 (file)
 #include <linux/slab.h>
 #include <linux/dma-mapping.h>
 #include <linux/dma-contiguous.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
 #include <linux/vmalloc.h>
 #include <linux/swiotlb.h>
+#include <linux/amba/bus.h>
 
 #include <asm/cacheflush.h>
 
@@ -305,17 +308,45 @@ struct dma_map_ops coherent_swiotlb_dma_ops = {
 };
 EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
 
+static int dma_bus_notifier(struct notifier_block *nb,
+                           unsigned long event, void *_dev)
+{
+       struct device *dev = _dev;
+
+       if (event != BUS_NOTIFY_ADD_DEVICE)
+               return NOTIFY_DONE;
+
+       if (of_property_read_bool(dev->of_node, "dma-coherent"))
+               set_dma_ops(dev, &coherent_swiotlb_dma_ops);
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block platform_bus_nb = {
+       .notifier_call = dma_bus_notifier,
+};
+
+static struct notifier_block amba_bus_nb = {
+       .notifier_call = dma_bus_notifier,
+};
+
 extern int swiotlb_late_init_with_default_size(size_t default_size);
 
 static int __init swiotlb_late_init(void)
 {
        size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
 
-       dma_ops = &coherent_swiotlb_dma_ops;
+       /*
+        * These must be registered before of_platform_populate().
+        */
+       bus_register_notifier(&platform_bus_type, &platform_bus_nb);
+       bus_register_notifier(&amba_bustype, &amba_bus_nb);
+
+       dma_ops = &noncoherent_swiotlb_dma_ops;
 
        return swiotlb_late_init_with_default_size(swiotlb_size);
 }
-subsys_initcall(swiotlb_late_init);
+arch_initcall(swiotlb_late_init);
 
 #define PREALLOC_DMA_DEBUG_ENTRIES     4096
 
index 6b7e89569a3a9ff8518e7c6ee856603f1e9fb93b..0a472c41a67fa9dc33e9c746c24a402d6f306289 100644 (file)
@@ -374,6 +374,9 @@ int kern_addr_valid(unsigned long addr)
        if (pmd_none(*pmd))
                return 0;
 
+       if (pmd_sect(*pmd))
+               return pfn_valid(pmd_pfn(*pmd));
+
        pte = pte_offset_kernel(pmd, addr);
        if (pte_none(*pte))
                return 0;
diff --git a/arch/hexagon/include/asm/barrier.h b/arch/hexagon/include/asm/barrier.h
deleted file mode 100644 (file)
index 4e863da..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Memory barrier definitions for the Hexagon architecture
- *
- * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
- * 02110-1301, USA.
- */
-
-#ifndef _ASM_BARRIER_H
-#define _ASM_BARRIER_H
-
-#define rmb()                          barrier()
-#define read_barrier_depends()         barrier()
-#define wmb()                          barrier()
-#define mb()                           barrier()
-#define smp_rmb()                      barrier()
-#define smp_read_barrier_depends()     barrier()
-#define smp_wmb()                      barrier()
-#define smp_mb()                       barrier()
-
-/*  Set a value and use a memory barrier.  Used by the scheduler somewhere.  */
-#define set_mb(var, value) \
-       do { var = value; mb(); } while (0)
-
-#endif /* _ASM_BARRIER_H */
index a580642555b6f0e7f087ade117ce062cb303d429..348356c99514f0cdfb8876b9f22c0464ab8e3734 100644 (file)
@@ -1,6 +1,8 @@
 # UAPI Header export list
 include include/uapi/asm-generic/Kbuild.asm
 
+generic-y += resource.h
+
 header-y += bitsperlong.h
 header-y += byteorder.h
 header-y += errno.h
@@ -13,7 +15,6 @@ header-y += msgbuf.h
 header-y += pdc.h
 header-y += posix_types.h
 header-y += ptrace.h
-header-y += resource.h
 header-y += sembuf.h
 header-y += setup.h
 header-y += shmbuf.h
diff --git a/arch/parisc/include/uapi/asm/resource.h b/arch/parisc/include/uapi/asm/resource.h
deleted file mode 100644 (file)
index 8b06343..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef _ASM_PARISC_RESOURCE_H
-#define _ASM_PARISC_RESOURCE_H
-
-#define _STK_LIM_MAX   10 * _STK_LIM
-#include <asm-generic/resource.h>
-
-#endif
index a28f02165e97032c8eda569e97b06a4dc81fb0f9..d367a0aece2aac8b067a6c7bf51c43ef3d488eda 100644 (file)
@@ -139,18 +139,18 @@ static struct addr_range prep_initrd(struct addr_range vmlinux, void *chosen,
  * edit the command line passed to vmlinux (by setting /chosen/bootargs).
  * The buffer is put in it's own section so that tools may locate it easier.
  */
-static char cmdline[COMMAND_LINE_SIZE]
+static char cmdline[BOOT_COMMAND_LINE_SIZE]
        __attribute__((__section__("__builtin_cmdline")));
 
 static void prep_cmdline(void *chosen)
 {
        if (cmdline[0] == '\0')
-               getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
+               getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
 
        printf("\n\rLinux/PowerPC load: %s", cmdline);
        /* If possible, edit the command line */
        if (console_ops.edit_cmdline)
-               console_ops.edit_cmdline(cmdline, COMMAND_LINE_SIZE);
+               console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE);
        printf("\n\r");
 
        /* Put the command line back into the devtree for the kernel */
@@ -174,7 +174,7 @@ void start(void)
         * built-in command line wasn't set by an external tool */
        if ((loader_info.cmdline_len > 0) && (cmdline[0] == '\0'))
                memmove(cmdline, loader_info.cmdline,
-                       min(loader_info.cmdline_len, COMMAND_LINE_SIZE-1));
+                       min(loader_info.cmdline_len, BOOT_COMMAND_LINE_SIZE-1));
 
        if (console_ops.open && (console_ops.open() < 0))
                exit();
index b3218ce451bb9be8081dd77cc75f0a3114468be1..8aad3c55aeda2885e331b8c7ebd52436c219e74b 100644 (file)
@@ -15,7 +15,7 @@
 #include "types.h"
 #include "string.h"
 
-#define        COMMAND_LINE_SIZE       512
+#define        BOOT_COMMAND_LINE_SIZE  2048
 #define        MAX_PATH_LEN            256
 #define        MAX_PROP_LEN            256 /* What should this be? */
 
index 9954d98871d061dfc9abb8c33fefcbbe00d8d2ea..4ec2d86d3c50571a2a62f27c31f00739595ed219 100644 (file)
@@ -47,13 +47,13 @@ BSS_STACK(4096);
  * The buffer is put in it's own section so that tools may locate it easier.
  */
 
-static char cmdline[COMMAND_LINE_SIZE]
+static char cmdline[BOOT_COMMAND_LINE_SIZE]
        __attribute__((__section__("__builtin_cmdline")));
 
 static void prep_cmdline(void *chosen)
 {
        if (cmdline[0] == '\0')
-               getprop(chosen, "bootargs", cmdline, COMMAND_LINE_SIZE-1);
+               getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
        else
                setprop_str(chosen, "bootargs", cmdline);
 
index a2efdaa020b0f30b11a08b1352b1f9d7388ad3af..66ad7a74116f15dd803ef7e887b7988ac5efa61a 100644 (file)
@@ -41,14 +41,14 @@ struct opal_takeover_args {
  * size except the last one in the list to be as well.
  */
 struct opal_sg_entry {
-       void    *data;
-       long    length;
+       __be64 data;
+       __be64 length;
 };
 
-/* sg list */
+/* SG list */
 struct opal_sg_list {
-       unsigned long num_entries;
-       struct opal_sg_list *next;
+       __be64 length;
+       __be64 next;
        struct opal_sg_entry entry[];
 };
 
@@ -858,8 +858,8 @@ int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
 int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
                      uint32_t addr, __be32 *data, uint32_t sz);
 
-int64_t opal_read_elog(uint64_t buffer, size_t size, uint64_t log_id);
-int64_t opal_get_elog_size(uint64_t *log_id, size_t *size, uint64_t *elog_type);
+int64_t opal_read_elog(uint64_t buffer, uint64_t size, uint64_t log_id);
+int64_t opal_get_elog_size(__be64 *log_id, __be64 *size, __be64 *elog_type);
 int64_t opal_write_elog(uint64_t buffer, uint64_t size, uint64_t offset);
 int64_t opal_send_ack_elog(uint64_t log_id);
 void opal_resend_pending_logs(void);
@@ -868,23 +868,24 @@ int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
 int64_t opal_manage_flash(uint8_t op);
 int64_t opal_update_flash(uint64_t blk_list);
 int64_t opal_dump_init(uint8_t dump_type);
-int64_t opal_dump_info(uint32_t *dump_id, uint32_t *dump_size);
-int64_t opal_dump_info2(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type);
+int64_t opal_dump_info(__be32 *dump_id, __be32 *dump_size);
+int64_t opal_dump_info2(__be32 *dump_id, __be32 *dump_size, __be32 *dump_type);
 int64_t opal_dump_read(uint32_t dump_id, uint64_t buffer);
 int64_t opal_dump_ack(uint32_t dump_id);
 int64_t opal_dump_resend_notification(void);
 
-int64_t opal_get_msg(uint64_t buffer, size_t size);
-int64_t opal_check_completion(uint64_t buffer, size_t size, uint64_t token);
+int64_t opal_get_msg(uint64_t buffer, uint64_t size);
+int64_t opal_check_completion(uint64_t buffer, uint64_t size, uint64_t token);
 int64_t opal_sync_host_reboot(void);
 int64_t opal_get_param(uint64_t token, uint32_t param_id, uint64_t buffer,
-               size_t length);
+               uint64_t length);
 int64_t opal_set_param(uint64_t token, uint32_t param_id, uint64_t buffer,
-               size_t length);
+               uint64_t length);
 int64_t opal_sensor_read(uint32_t sensor_hndl, int token, __be32 *sensor_data);
 
 /* Internal functions */
-extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data);
+extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
+                                  int depth, void *data);
 extern int early_init_dt_scan_recoverable_ranges(unsigned long node,
                                 const char *uname, int depth, void *data);
 
@@ -893,10 +894,6 @@ extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
 
 extern void hvc_opal_init_early(void);
 
-/* Internal functions */
-extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
-                                  int depth, void *data);
-
 extern int opal_notifier_register(struct notifier_block *nb);
 extern int opal_notifier_unregister(struct notifier_block *nb);
 
@@ -906,9 +903,6 @@ extern void opal_notifier_enable(void);
 extern void opal_notifier_disable(void);
 extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
 
-extern int opal_get_chars(uint32_t vtermno, char *buf, int count);
-extern int opal_put_chars(uint32_t vtermno, const char *buf, int total_len);
-
 extern int __opal_async_get_token(void);
 extern int opal_async_get_token_interruptible(void);
 extern int __opal_async_release_token(int token);
@@ -916,8 +910,6 @@ extern int opal_async_release_token(int token);
 extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
 extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
 
-extern void hvc_opal_init_early(void);
-
 struct rtc_time;
 extern int opal_set_rtc_time(struct rtc_time *tm);
 extern void opal_get_rtc_time(struct rtc_time *tm);
@@ -937,6 +929,10 @@ extern int opal_resync_timebase(void);
 
 extern void opal_lpc_init(void);
 
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+                                            unsigned long vmalloc_size);
+void opal_free_sg_list(struct opal_sg_list *sg);
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __OPAL_H */
index 552df83f1a49627ddd1d49e1ba982c743a5bfefe..ae3fb68cb28e8df5cb53f078ccc5bcc923c8231c 100644 (file)
@@ -1 +1,6 @@
-#include <asm-generic/setup.h>
+#ifndef _UAPI_ASM_POWERPC_SETUP_H
+#define _UAPI_ASM_POWERPC_SETUP_H
+
+#define COMMAND_LINE_SIZE      2048
+
+#endif /* _UAPI_ASM_POWERPC_SETUP_H */
index 3bd77edd7610ce20267a880069972624eafed62e..450850a49dced7919c3c2d349c2d70aae7cea0ad 100644 (file)
@@ -120,6 +120,7 @@ EXPORT_SYMBOL(giveup_spe);
 EXPORT_SYMBOL(flush_instruction_cache);
 #endif
 EXPORT_SYMBOL(flush_dcache_range);
+EXPORT_SYMBOL(flush_icache_range);
 
 #ifdef CONFIG_SMP
 #ifdef CONFIG_PPC32
index 2f3cdb01506de3d7791712ecd6ffeaf1fcd36352..658e89d2025b0b2dd65bb812d2c89d65867c1015 100644 (file)
@@ -705,7 +705,7 @@ static int __init rtas_flash_init(void)
        if (rtas_token("ibm,update-flash-64-and-reboot") ==
                       RTAS_UNKNOWN_SERVICE) {
                pr_info("rtas_flash: no firmware flash support\n");
-               return 1;
+               return -EINVAL;
        }
 
        rtas_validate_flash_data.buf = kzalloc(VALIDATE_BUF_SIZE, GFP_KERNEL);
index ffbb871c2bd803827fa5a78658f29d2fa8a1dbd6..b031f932c0cc3dcc0c452c88f8ba2c3c88cf641d 100644 (file)
@@ -242,6 +242,12 @@ kvm_novcpu_exit:
  */
        .globl  kvm_start_guest
 kvm_start_guest:
+
+       /* Set runlatch bit the minute you wake up from nap */
+       mfspr   r1, SPRN_CTRLF
+       ori     r1, r1, 1
+       mtspr   SPRN_CTRLT, r1
+
        ld      r2,PACATOC(r13)
 
        li      r0,KVM_HWTHREAD_IN_KVM
@@ -309,6 +315,11 @@ kvm_no_guest:
        li      r0, KVM_HWTHREAD_IN_NAP
        stb     r0, HSTATE_HWTHREAD_STATE(r13)
 kvm_do_nap:
+       /* Clear the runlatch bit before napping */
+       mfspr   r2, SPRN_CTRLF
+       clrrdi  r2, r2, 1
+       mtspr   SPRN_CTRLT, r2
+
        li      r3, LPCR_PECE0
        mfspr   r4, SPRN_LPCR
        rlwimi  r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
@@ -1999,8 +2010,13 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
 
        /*
         * Take a nap until a decrementer or external or doobell interrupt
-        * occurs, with PECE1, PECE0 and PECEDP set in LPCR
+        * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
+        * runlatch bit before napping.
         */
+       mfspr   r2, SPRN_CTRLF
+       clrrdi  r2, r2, 1
+       mtspr   SPRN_CTRLT, r2
+
        li      r0,1
        stb     r0,HSTATE_HWTHREAD_REQ(r13)
        mfspr   r5,SPRN_LPCR
index 3ea26c25590be1dabe4a057882f35b77f5dfe7c1..cf1d325eae8be814953650cf6b94fd349c0fdd12 100644 (file)
@@ -82,17 +82,14 @@ static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
                va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
                va |= penc << 12;
                va |= ssize << 8;
-               /* Add AVAL part */
-               if (psize != apsize) {
-                       /*
-                        * MPSS, 64K base page size and 16MB parge page size
-                        * We don't need all the bits, but rest of the bits
-                        * must be ignored by the processor.
-                        * vpn cover upto 65 bits of va. (0...65) and we need
-                        * 58..64 bits of va.
-                        */
-                       va |= (vpn & 0xfe);
-               }
+               /*
+                * AVAL bits:
+                * We don't need all the bits, but rest of the bits
+                * must be ignored by the processor.
+                * vpn cover upto 65 bits of va. (0...65) and we need
+                * 58..64 bits of va.
+                */
+               va |= (vpn & 0xfe); /* AVAL */
                va |= 1; /* L */
                asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
                             : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
@@ -133,17 +130,14 @@ static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
                va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
                va |= penc << 12;
                va |= ssize << 8;
-               /* Add AVAL part */
-               if (psize != apsize) {
-                       /*
-                        * MPSS, 64K base page size and 16MB parge page size
-                        * We don't need all the bits, but rest of the bits
-                        * must be ignored by the processor.
-                        * vpn cover upto 65 bits of va. (0...65) and we need
-                        * 58..64 bits of va.
-                        */
-                       va |= (vpn & 0xfe);
-               }
+               /*
+                * AVAL bits:
+                * We don't need all the bits, but rest of the bits
+                * must be ignored by the processor.
+                * vpn cover upto 65 bits of va. (0...65) and we need
+                * 58..64 bits of va.
+                */
+               va |= (vpn & 0xfe);
                va |= 1; /* L */
                asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
                             : : "r"(va) : "memory");
index 297c9105141365e81316a888176fe80611b5b08a..e0766b82e1656721ff9e93586b47552414936973 100644 (file)
@@ -155,16 +155,28 @@ static ssize_t read_offset_data(void *dest, size_t dest_len,
        return copy_len;
 }
 
-static unsigned long h_get_24x7_catalog_page(char page[static 4096],
-                                            u32 version, u32 index)
+static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
+                                             unsigned long version,
+                                             unsigned long index)
 {
-       WARN_ON(!IS_ALIGNED((unsigned long)page, 4096));
+       pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
+                       phys_4096,
+                       version,
+                       index);
+       WARN_ON(!IS_ALIGNED(phys_4096, 4096));
        return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
-                       virt_to_phys(page),
+                       phys_4096,
                        version,
                        index);
 }
 
+static unsigned long h_get_24x7_catalog_page(char page[],
+                                            u64 version, u32 index)
+{
+       return h_get_24x7_catalog_page_(virt_to_phys(page),
+                                       version, index);
+}
+
 static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
                            struct bin_attribute *bin_attr, char *buf,
                            loff_t offset, size_t count)
@@ -173,7 +185,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
        ssize_t ret = 0;
        size_t catalog_len = 0, catalog_page_len = 0, page_count = 0;
        loff_t page_offset = 0;
-       uint32_t catalog_version_num = 0;
+       uint64_t catalog_version_num = 0;
        void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
        struct hv_24x7_catalog_page_0 *page_0 = page;
        if (!page)
@@ -185,7 +197,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
                goto e_free;
        }
 
-       catalog_version_num = be32_to_cpu(page_0->version);
+       catalog_version_num = be64_to_cpu(page_0->version);
        catalog_page_len = be32_to_cpu(page_0->length);
        catalog_len = catalog_page_len * 4096;
 
@@ -208,8 +220,9 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
                                page, 4096, page_offset * 4096);
 e_free:
        if (hret)
-               pr_err("h_get_24x7_catalog_page(ver=%d, page=%lld) failed: rc=%ld\n",
-                               catalog_version_num, page_offset, hret);
+               pr_err("h_get_24x7_catalog_page(ver=%lld, page=%lld) failed:"
+                      " rc=%ld\n",
+                      catalog_version_num, page_offset, hret);
        kfree(page);
 
        pr_devel("catalog_read: offset=%lld(%lld) count=%zu(%zu) catalog_len=%zu(%zu) => %zd\n",
@@ -243,7 +256,7 @@ e_free:                                                             \
 static DEVICE_ATTR_RO(_name)
 
 PAGE_0_ATTR(catalog_version, "%lld\n",
-               (unsigned long long)be32_to_cpu(page_0->version));
+               (unsigned long long)be64_to_cpu(page_0->version));
 PAGE_0_ATTR(catalog_len, "%lld\n",
                (unsigned long long)be32_to_cpu(page_0->length) * 4096);
 static BIN_ATTR_RO(catalog, 0/* real length varies */);
@@ -485,13 +498,13 @@ static int hv_24x7_init(void)
        struct hv_perf_caps caps;
 
        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
-               pr_info("not a virtualized system, not enabling\n");
+               pr_debug("not a virtualized system, not enabling\n");
                return -ENODEV;
        }
 
        hret = hv_perf_caps_get(&caps);
        if (hret) {
-               pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
+               pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
                                hret);
                return -ENODEV;
        }
index 278ba7b9c2b525287f930445e71e04bd538e3236..c9d399a2df82e6727fa78b4de69fb867cc85552f 100644 (file)
@@ -78,7 +78,7 @@ static ssize_t kernel_version_show(struct device *dev,
        return sprintf(page, "0x%x\n", COUNTER_INFO_VERSION_CURRENT);
 }
 
-DEVICE_ATTR_RO(kernel_version);
+static DEVICE_ATTR_RO(kernel_version);
 HV_CAPS_ATTR(version, "0x%x\n");
 HV_CAPS_ATTR(ga, "%d\n");
 HV_CAPS_ATTR(expanded, "%d\n");
@@ -273,13 +273,13 @@ static int hv_gpci_init(void)
        struct hv_perf_caps caps;
 
        if (!firmware_has_feature(FW_FEATURE_LPAR)) {
-               pr_info("not a virtualized system, not enabling\n");
+               pr_debug("not a virtualized system, not enabling\n");
                return -ENODEV;
        }
 
        hret = hv_perf_caps_get(&caps);
        if (hret) {
-               pr_info("could not obtain capabilities, error 0x%80lx, not enabling\n",
+               pr_debug("could not obtain capabilities, not enabling, rc=%ld\n",
                                hret);
                return -ENODEV;
        }
index b9827b0d87e4cd69cdff9f51216ea9429c79fb41..788a1977b9a5203cc9a477be6f9a2a7b71cdd754 100644 (file)
@@ -209,89 +209,20 @@ static struct kobj_type dump_ktype = {
        .default_attrs = dump_default_attrs,
 };
 
-static void free_dump_sg_list(struct opal_sg_list *list)
-{
-       struct opal_sg_list *sg1;
-       while (list) {
-               sg1 = list->next;
-               kfree(list);
-               list = sg1;
-       }
-       list = NULL;
-}
-
-static struct opal_sg_list *dump_data_to_sglist(struct dump_obj *dump)
-{
-       struct opal_sg_list *sg1, *list = NULL;
-       void *addr;
-       int64_t size;
-
-       addr = dump->buffer;
-       size = dump->size;
-
-       sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!sg1)
-               goto nomem;
-
-       list = sg1;
-       sg1->num_entries = 0;
-       while (size > 0) {
-               /* Translate virtual address to physical address */
-               sg1->entry[sg1->num_entries].data =
-                       (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
-
-               if (size > PAGE_SIZE)
-                       sg1->entry[sg1->num_entries].length = PAGE_SIZE;
-               else
-                       sg1->entry[sg1->num_entries].length = size;
-
-               sg1->num_entries++;
-               if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
-                       sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
-                       if (!sg1->next)
-                               goto nomem;
-
-                       sg1 = sg1->next;
-                       sg1->num_entries = 0;
-               }
-               addr += PAGE_SIZE;
-               size -= PAGE_SIZE;
-       }
-       return list;
-
-nomem:
-       pr_err("%s : Failed to allocate memory\n", __func__);
-       free_dump_sg_list(list);
-       return NULL;
-}
-
-static void sglist_to_phy_addr(struct opal_sg_list *list)
-{
-       struct opal_sg_list *sg, *next;
-
-       for (sg = list; sg; sg = next) {
-               next = sg->next;
-               /* Don't translate NULL pointer for last entry */
-               if (sg->next)
-                       sg->next = (struct opal_sg_list *)__pa(sg->next);
-               else
-                       sg->next = NULL;
-
-               /* Convert num_entries to length */
-               sg->num_entries =
-                       sg->num_entries * sizeof(struct opal_sg_entry) + 16;
-       }
-}
-
-static int64_t dump_read_info(uint32_t *id, uint32_t *size, uint32_t *type)
+static int64_t dump_read_info(uint32_t *dump_id, uint32_t *dump_size, uint32_t *dump_type)
 {
+       __be32 id, size, type;
        int rc;
-       *type = 0xffffffff;
 
-       rc = opal_dump_info2(id, size, type);
+       type = cpu_to_be32(0xffffffff);
 
+       rc = opal_dump_info2(&id, &size, &type);
        if (rc == OPAL_PARAMETER)
-               rc = opal_dump_info(id, size);
+               rc = opal_dump_info(&id, &size);
+
+       *dump_id = be32_to_cpu(id);
+       *dump_size = be32_to_cpu(size);
+       *dump_type = be32_to_cpu(type);
 
        if (rc)
                pr_warn("%s: Failed to get dump info (%d)\n",
@@ -314,15 +245,12 @@ static int64_t dump_read_data(struct dump_obj *dump)
        }
 
        /* Generate SG list */
-       list = dump_data_to_sglist(dump);
+       list = opal_vmalloc_to_sg_list(dump->buffer, dump->size);
        if (!list) {
                rc = -ENOMEM;
                goto out;
        }
 
-       /* Translate sg list addr to real address */
-       sglist_to_phy_addr(list);
-
        /* First entry address */
        addr = __pa(list);
 
@@ -341,7 +269,7 @@ static int64_t dump_read_data(struct dump_obj *dump)
                        __func__, dump->id);
 
        /* Free SG list */
-       free_dump_sg_list(list);
+       opal_free_sg_list(list);
 
 out:
        return rc;
index ef7bc2a978627422d659d783ea21f0869975df53..10268c41d8302dd39ed73f4e0ad98dd6deb5e25f 100644 (file)
@@ -238,18 +238,25 @@ static struct elog_obj *create_elog_obj(uint64_t id, size_t size, uint64_t type)
 
 static void elog_work_fn(struct work_struct *work)
 {
-       size_t elog_size;
+       __be64 size;
+       __be64 id;
+       __be64 type;
+       uint64_t elog_size;
        uint64_t log_id;
        uint64_t elog_type;
        int rc;
        char name[2+16+1];
 
-       rc = opal_get_elog_size(&log_id, &elog_size, &elog_type);
+       rc = opal_get_elog_size(&id, &size, &type);
        if (rc != OPAL_SUCCESS) {
                pr_err("ELOG: Opal log read failed\n");
                return;
        }
 
+       elog_size = be64_to_cpu(size);
+       log_id = be64_to_cpu(id);
+       elog_type = be64_to_cpu(type);
+
        BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
 
        if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
index 714ef972406bcacf66a4a896283c6fb25963ca16..dc487ff0470401b0613b28958ff102dbcc5103e1 100644 (file)
@@ -79,9 +79,6 @@
 /* XXX: Assume candidate image size is <= 1GB */
 #define MAX_IMAGE_SIZE 0x40000000
 
-/* Flash sg list version */
-#define SG_LIST_VERSION (1UL)
-
 /* Image status */
 enum {
        IMAGE_INVALID,
@@ -131,11 +128,15 @@ static DEFINE_MUTEX(image_data_mutex);
  */
 static inline void opal_flash_validate(void)
 {
-       struct validate_flash_t *args_buf = &validate_flash_data;
+       long ret;
+       void *buf = validate_flash_data.buf;
+       __be32 size, result;
 
-       args_buf->status = opal_validate_flash(__pa(args_buf->buf),
-                                              &(args_buf->buf_size),
-                                              &(args_buf->result));
+       ret = opal_validate_flash(__pa(buf), &size, &result);
+
+       validate_flash_data.status = ret;
+       validate_flash_data.buf_size = be32_to_cpu(size);
+       validate_flash_data.result = be32_to_cpu(result);
 }
 
 /*
@@ -267,94 +268,12 @@ static ssize_t manage_store(struct kobject *kobj,
        return count;
 }
 
-/*
- * Free sg list
- */
-static void free_sg_list(struct opal_sg_list *list)
-{
-       struct opal_sg_list *sg1;
-       while (list) {
-               sg1 = list->next;
-               kfree(list);
-               list = sg1;
-       }
-       list = NULL;
-}
-
-/*
- * Build candidate image scatter gather list
- *
- * list format:
- *   -----------------------------------
- *  |  VER (8) | Entry length in bytes  |
- *   -----------------------------------
- *  |  Pointer to next entry            |
- *   -----------------------------------
- *  |  Address of memory area 1         |
- *   -----------------------------------
- *  |  Length of memory area 1          |
- *   -----------------------------------
- *  |   .........                       |
- *   -----------------------------------
- *  |   .........                       |
- *   -----------------------------------
- *  |  Address of memory area N         |
- *   -----------------------------------
- *  |  Length of memory area N          |
- *   -----------------------------------
- */
-static struct opal_sg_list *image_data_to_sglist(void)
-{
-       struct opal_sg_list *sg1, *list = NULL;
-       void *addr;
-       int size;
-
-       addr = image_data.data;
-       size = image_data.size;
-
-       sg1 = kzalloc(PAGE_SIZE, GFP_KERNEL);
-       if (!sg1)
-               return NULL;
-
-       list = sg1;
-       sg1->num_entries = 0;
-       while (size > 0) {
-               /* Translate virtual address to physical address */
-               sg1->entry[sg1->num_entries].data =
-                       (void *)(vmalloc_to_pfn(addr) << PAGE_SHIFT);
-
-               if (size > PAGE_SIZE)
-                       sg1->entry[sg1->num_entries].length = PAGE_SIZE;
-               else
-                       sg1->entry[sg1->num_entries].length = size;
-
-               sg1->num_entries++;
-               if (sg1->num_entries >= SG_ENTRIES_PER_NODE) {
-                       sg1->next = kzalloc(PAGE_SIZE, GFP_KERNEL);
-                       if (!sg1->next) {
-                               pr_err("%s : Failed to allocate memory\n",
-                                      __func__);
-                               goto nomem;
-                       }
-
-                       sg1 = sg1->next;
-                       sg1->num_entries = 0;
-               }
-               addr += PAGE_SIZE;
-               size -= PAGE_SIZE;
-       }
-       return list;
-nomem:
-       free_sg_list(list);
-       return NULL;
-}
-
 /*
  * OPAL update flash
  */
 static int opal_flash_update(int op)
 {
-       struct opal_sg_list *sg, *list, *next;
+       struct opal_sg_list *list;
        unsigned long addr;
        int64_t rc = OPAL_PARAMETER;
 
@@ -364,30 +283,13 @@ static int opal_flash_update(int op)
                goto flash;
        }
 
-       list = image_data_to_sglist();
+       list = opal_vmalloc_to_sg_list(image_data.data, image_data.size);
        if (!list)
                goto invalid_img;
 
        /* First entry address */
        addr = __pa(list);
 
-       /* Translate sg list address to absolute */
-       for (sg = list; sg; sg = next) {
-               next = sg->next;
-               /* Don't translate NULL pointer for last entry */
-               if (sg->next)
-                       sg->next = (struct opal_sg_list *)__pa(sg->next);
-               else
-                       sg->next = NULL;
-
-               /*
-                * Convert num_entries to version/length format
-                * to satisfy OPAL.
-                */
-               sg->num_entries = (SG_LIST_VERSION << 56) |
-                       (sg->num_entries * sizeof(struct opal_sg_entry) + 16);
-       }
-
        pr_alert("FLASH: Image is %u bytes\n", image_data.size);
        pr_alert("FLASH: Image update requested\n");
        pr_alert("FLASH: Image will be updated during system reboot\n");
index 6b614726baf2add5f95237647f128c5c7119e173..d202f9bc3683f5ad0072282173ddfb510b1aec9a 100644 (file)
@@ -39,10 +39,11 @@ struct param_attr {
        struct kobj_attribute kobj_attr;
 };
 
-static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
+static ssize_t opal_get_sys_param(u32 param_id, u32 length, void *buffer)
 {
        struct opal_msg msg;
-       int ret, token;
+       ssize_t ret;
+       int token;
 
        token = opal_async_get_token_interruptible();
        if (token < 0) {
@@ -59,7 +60,7 @@ static int opal_get_sys_param(u32 param_id, u32 length, void *buffer)
 
        ret = opal_async_wait_response(token, &msg);
        if (ret) {
-               pr_err("%s: Failed to wait for the async response, %d\n",
+               pr_err("%s: Failed to wait for the async response, %zd\n",
                                __func__, ret);
                goto out_token;
        }
@@ -111,7 +112,7 @@ static ssize_t sys_param_show(struct kobject *kobj,
 {
        struct param_attr *attr = container_of(kobj_attr, struct param_attr,
                        kobj_attr);
-       int ret;
+       ssize_t ret;
 
        mutex_lock(&opal_sysparam_mutex);
        ret = opal_get_sys_param(attr->param_id, attr->param_size,
@@ -121,9 +122,10 @@ static ssize_t sys_param_show(struct kobject *kobj,
 
        memcpy(buf, param_data_buf, attr->param_size);
 
+       ret = attr->param_size;
 out:
        mutex_unlock(&opal_sysparam_mutex);
-       return ret ? ret : attr->param_size;
+       return ret;
 }
 
 static ssize_t sys_param_store(struct kobject *kobj,
@@ -131,14 +133,20 @@ static ssize_t sys_param_store(struct kobject *kobj,
 {
        struct param_attr *attr = container_of(kobj_attr, struct param_attr,
                        kobj_attr);
-       int ret;
+       ssize_t ret;
+
+        /* MAX_PARAM_DATA_LEN is sizeof(param_data_buf) */
+        if (count > MAX_PARAM_DATA_LEN)
+                count = MAX_PARAM_DATA_LEN;
 
        mutex_lock(&opal_sysparam_mutex);
        memcpy(param_data_buf, buf, count);
        ret = opal_set_sys_param(attr->param_id, attr->param_size,
                        param_data_buf);
        mutex_unlock(&opal_sysparam_mutex);
-       return ret ? ret : count;
+       if (!ret)
+               ret = count;
+       return ret;
 }
 
 void __init opal_sys_param_init(void)
@@ -214,13 +222,13 @@ void __init opal_sys_param_init(void)
        }
 
        if (of_property_read_u32_array(sysparam, "param-len", size, count)) {
-               pr_err("SYSPARAM: Missing propery param-len in the DT\n");
+               pr_err("SYSPARAM: Missing property param-len in the DT\n");
                goto out_free_perm;
        }
 
 
        if (of_property_read_u8_array(sysparam, "param-perm", perm, count)) {
-               pr_err("SYSPARAM: Missing propery param-perm in the DT\n");
+               pr_err("SYSPARAM: Missing property param-perm in the DT\n");
                goto out_free_perm;
        }
 
@@ -233,6 +241,12 @@ void __init opal_sys_param_init(void)
 
        /* For each of the parameters, populate the parameter attributes */
        for (i = 0; i < count; i++) {
+               if (size[i] > MAX_PARAM_DATA_LEN) {
+                       pr_warn("SYSPARAM: Not creating parameter %d as size "
+                               "exceeds buffer length\n", i);
+                       continue;
+               }
+
                sysfs_attr_init(&attr[i].kobj_attr.attr);
                attr[i].param_id = id[i];
                attr[i].param_size = size[i];
index 49d2f00019e5d8092f7f32e9b3c6dfc53cceeee6..360ad80c754ce3c97ad9b9fead5ad4806e5f6666 100644 (file)
@@ -242,14 +242,14 @@ void opal_notifier_update_evt(uint64_t evt_mask,
 void opal_notifier_enable(void)
 {
        int64_t rc;
-       uint64_t evt = 0;
+       __be64 evt = 0;
 
        atomic_set(&opal_notifier_hold, 0);
 
        /* Process pending events */
        rc = opal_poll_events(&evt);
        if (rc == OPAL_SUCCESS && evt)
-               opal_do_notifier(evt);
+               opal_do_notifier(be64_to_cpu(evt));
 }
 
 void opal_notifier_disable(void)
@@ -529,7 +529,7 @@ static irqreturn_t opal_interrupt(int irq, void *data)
 
        opal_handle_interrupt(virq_to_hw(irq), &events);
 
-       opal_do_notifier(events);
+       opal_do_notifier(be64_to_cpu(events));
 
        return IRQ_HANDLED;
 }
@@ -638,3 +638,66 @@ void opal_shutdown(void)
 
 /* Export this so that test modules can use it */
 EXPORT_SYMBOL_GPL(opal_invalid_call);
+
+/* Convert a region of vmalloc memory to an opal sg list */
+struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
+                                            unsigned long vmalloc_size)
+{
+       struct opal_sg_list *sg, *first = NULL;
+       unsigned long i = 0;
+
+       sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
+       if (!sg)
+               goto nomem;
+
+       first = sg;
+
+       while (vmalloc_size > 0) {
+               uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
+               uint64_t length = min(vmalloc_size, PAGE_SIZE);
+
+               sg->entry[i].data = cpu_to_be64(data);
+               sg->entry[i].length = cpu_to_be64(length);
+               i++;
+
+               if (i >= SG_ENTRIES_PER_NODE) {
+                       struct opal_sg_list *next;
+
+                       next = kzalloc(PAGE_SIZE, GFP_KERNEL);
+                       if (!next)
+                               goto nomem;
+
+                       sg->length = cpu_to_be64(
+                                       i * sizeof(struct opal_sg_entry) + 16);
+                       i = 0;
+                       sg->next = cpu_to_be64(__pa(next));
+                       sg = next;
+               }
+
+               vmalloc_addr += length;
+               vmalloc_size -= length;
+       }
+
+       sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
+
+       return first;
+
+nomem:
+       pr_err("%s : Failed to allocate memory\n", __func__);
+       opal_free_sg_list(first);
+       return NULL;
+}
+
+void opal_free_sg_list(struct opal_sg_list *sg)
+{
+       while (sg) {
+               uint64_t next = be64_to_cpu(sg->next);
+
+               kfree(sg);
+
+               if (next)
+                       sg = __va(next);
+               else
+                       sg = NULL;
+       }
+}
index 3b2b4fb3585b6b9fac45878041772285591d1d63..98824aa991731882cca87f806f464198f356ff26 100644 (file)
@@ -343,7 +343,6 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
                                pci_name(dev));
                        continue;
                }
-               pci_dev_get(dev);
                pdn->pcidev = dev;
                pdn->pe_number = pe->pe_number;
                pe->dma_weight += pnv_ioda_dma_weight(dev);
@@ -462,7 +461,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
 
        pe = &phb->ioda.pe_array[pdn->pe_number];
        WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
-       set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
+       set_iommu_table_base(&pdev->dev, &pe->tce32_table);
 }
 
 static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
index 61cf8fa9c61b50489009b94c99c993fffaddde28..8723d32632f55b6eb49b25d55711140f60a95fe7 100644 (file)
@@ -162,18 +162,62 @@ static void pnv_shutdown(void)
 }
 
 #ifdef CONFIG_KEXEC
+static void pnv_kexec_wait_secondaries_down(void)
+{
+       int my_cpu, i, notified = -1;
+
+       my_cpu = get_cpu();
+
+       for_each_online_cpu(i) {
+               uint8_t status;
+               int64_t rc;
+
+               if (i == my_cpu)
+                       continue;
+
+               for (;;) {
+                       rc = opal_query_cpu_status(get_hard_smp_processor_id(i),
+                                                  &status);
+                       if (rc != OPAL_SUCCESS || status != OPAL_THREAD_STARTED)
+                               break;
+                       barrier();
+                       if (i != notified) {
+                               printk(KERN_INFO "kexec: waiting for cpu %d "
+                                      "(physical %d) to enter OPAL\n",
+                                      i, paca[i].hw_cpu_id);
+                               notified = i;
+                       }
+               }
+       }
+}
+
 static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
 {
        xics_kexec_teardown_cpu(secondary);
 
-       /* Return secondary CPUs to firmware on OPAL v3 */
-       if (firmware_has_feature(FW_FEATURE_OPALv3) && secondary) {
+       /* On OPAL v3, we return all CPUs to firmware */
+
+       if (!firmware_has_feature(FW_FEATURE_OPALv3))
+               return;
+
+       if (secondary) {
+               /* Return secondary CPUs to firmware on OPAL v3 */
                mb();
                get_paca()->kexec_state = KEXEC_STATE_REAL_MODE;
                mb();
 
                /* Return the CPU to OPAL */
                opal_return_cpu();
+       } else if (crash_shutdown) {
+               /*
+                * On crash, we don't wait for secondaries to go
+                * down as they might be unreachable or hung, so
+                * instead we just wait a bit and move on.
+                */
+               mdelay(1);
+       } else {
+               /* Primary waits for the secondaries to have reached OPAL */
+               pnv_kexec_wait_secondaries_down();
        }
 }
 #endif /* CONFIG_KEXEC */
index 908672bdcea6b2c77d75763a05d10f476e2b2d75..bf5fcd452168c6056492115c0232b40f25a2e407 100644 (file)
@@ -30,6 +30,7 @@
 #include <asm/cputhreads.h>
 #include <asm/xics.h>
 #include <asm/opal.h>
+#include <asm/runlatch.h>
 
 #include "powernv.h"
 
@@ -156,7 +157,9 @@ static void pnv_smp_cpu_kill_self(void)
         */
        mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
        while (!generic_check_cpu_restart(cpu)) {
+               ppc64_runlatch_off();
                power7_nap();
+               ppc64_runlatch_on();
                if (!generic_check_cpu_restart(cpu)) {
                        DBG("CPU%d Unexpected exit while offline !\n", cpu);
                        /* We may be getting an IPI, so we re-enable
index 9b8e05078a63e73a2993cc89890793353a45f9ea..20d62975856fb7fa5795a566629bbb69aa7a3139 100644 (file)
@@ -88,13 +88,14 @@ void set_default_offline_state(int cpu)
 
 static void rtas_stop_self(void)
 {
-       struct rtas_args args = {
-               .token = cpu_to_be32(rtas_stop_self_token),
+       static struct rtas_args args = {
                .nargs = 0,
                .nret = 1,
                .rets = &args.args[0],
        };
 
+       args.token = cpu_to_be32(rtas_stop_self_token);
+
        local_irq_disable();
 
        BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
index 573b488fc48b8a9b79674806d27599993c96b362..7f75c94af822c40322d8a4a751e983ad0e2bcdae 100644 (file)
@@ -100,10 +100,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
 
        start_pfn = base >> PAGE_SHIFT;
 
-       if (!pfn_valid(start_pfn)) {
-               memblock_remove(base, memblock_size);
-               return 0;
-       }
+       lock_device_hotplug();
+
+       if (!pfn_valid(start_pfn))
+               goto out;
 
        block_sz = memory_block_size_bytes();
        sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
@@ -114,8 +114,10 @@ static int pseries_remove_memblock(unsigned long base, unsigned int memblock_siz
                base += MIN_MEMORY_BLOCK_SIZE;
        }
 
+out:
        /* Update memory regions for memory remove */
        memblock_remove(base, memblock_size);
+       unlock_device_hotplug();
        return 0;
 }
 
index 64603a10b86313aace9d570494fcd86853357253..4914fd3f41eca710778ceaee742f29fee5137faf 100644 (file)
@@ -1058,7 +1058,7 @@ static int __init apm821xx_pciex_core_init(struct device_node *np)
        return 1;
 }
 
-static int apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
+static int __init apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
 {
        u32 val;
 
index 9c36dc398f9070afb4d6151d214623d1cbcf9f4a..452d3ebd9d0fba3b513a6978b096bd22b27c1b46 100644 (file)
@@ -276,7 +276,6 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter)
        case BPF_S_LD_W_IND:
        case BPF_S_LD_H_IND:
        case BPF_S_LD_B_IND:
-       case BPF_S_LDX_B_MSH:
        case BPF_S_LD_IMM:
        case BPF_S_LD_MEM:
        case BPF_S_MISC_TXA:
index d1b7c377a234e900b0af97d7a784e5cfeedd64f0..ce6ad7e6a7d7c7ba743884bbee2bcf77262f814d 100644 (file)
@@ -83,7 +83,9 @@ else
         KBUILD_CFLAGS += -m64
 
         # Don't autogenerate traditional x87, MMX or SSE instructions
-        KBUILD_CFLAGS += -mno-mmx -mno-sse -mno-80387 -mno-fp-ret-in-387
+        KBUILD_CFLAGS += -mno-mmx -mno-sse
+        KBUILD_CFLAGS += $(call cc-option,-mno-80387)
+        KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
 
        # Use -mpreferred-stack-boundary=3 if supported.
        KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
index 6ad4658de7056e02f104b505f35910587ec712f3..d23aa82e7a7bc25c702be004f804a0c9d02c15f7 100644 (file)
@@ -3425,6 +3425,11 @@ int get_nr_irqs_gsi(void)
        return nr_irqs_gsi;
 }
 
+unsigned int arch_dynirq_lower_bound(unsigned int from)
+{
+       return from < nr_irqs_gsi ? nr_irqs_gsi : from;
+}
+
 int __init arch_probe_nr_irqs(void)
 {
        int nr;
index 7c87424d4140ee488eab90f8c4828a893a7744f6..619f7699487aa1ec60f5a2687bede8e19d0a2c7a 100644 (file)
@@ -543,7 +543,8 @@ static int rapl_cpu_prepare(int cpu)
        if (phys_id < 0)
                return -1;
 
-       if (!rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
+       /* protect rdmsrl() to handle virtualization */
+       if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &msr_rapl_power_unit_bits))
                return -1;
 
        pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu));
index f6584a90aba346566d38b6df763a9b0669fd733f..5edc34b5b9514df1f63af487a314ab4053d197a2 100644 (file)
@@ -26,6 +26,9 @@
 
 #define TOPOLOGY_REGISTER_OFFSET 0x10
 
+/* Flag below is initialized once during vSMP PCI initialization. */
+static int irq_routing_comply = 1;
+
 #if defined CONFIG_PCI && defined CONFIG_PARAVIRT
 /*
  * Interrupt control on vSMPowered systems:
@@ -101,6 +104,10 @@ static void __init set_vsmp_pv_ops(void)
 #ifdef CONFIG_SMP
        if (cap & ctl & BIT(8)) {
                ctl &= ~BIT(8);
+
+               /* Interrupt routing set to ignore */
+               irq_routing_comply = 0;
+
 #ifdef CONFIG_PROC_FS
                /* Don't let users change irq affinity via procfs */
                no_irq_affinity = 1;
@@ -218,7 +225,9 @@ static void vsmp_apic_post_init(void)
 {
        /* need to update phys_pkg_id */
        apic->phys_pkg_id = apicid_phys_pkg_id;
-       apic->vector_allocation_domain = fill_vector_allocation_domain;
+
+       if (!irq_routing_comply)
+               apic->vector_allocation_domain = fill_vector_allocation_domain;
 }
 
 void __init vsmp_init(void)
index 1f68c5831924d15dd741032cde2fafc46aae50ab..33e8c028842fb4b0b59bc269a973b195a104cdf8 100644 (file)
@@ -503,7 +503,7 @@ static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
                                [number##_HIGH] = VMCS12_OFFSET(name)+4
 
 
-static const unsigned long shadow_read_only_fields[] = {
+static unsigned long shadow_read_only_fields[] = {
        /*
         * We do NOT shadow fields that are modified when L0
         * traps and emulates any vmx instruction (e.g. VMPTRLD,
@@ -526,10 +526,10 @@ static const unsigned long shadow_read_only_fields[] = {
        GUEST_LINEAR_ADDRESS,
        GUEST_PHYSICAL_ADDRESS
 };
-static const int max_shadow_read_only_fields =
+static int max_shadow_read_only_fields =
        ARRAY_SIZE(shadow_read_only_fields);
 
-static const unsigned long shadow_read_write_fields[] = {
+static unsigned long shadow_read_write_fields[] = {
        GUEST_RIP,
        GUEST_RSP,
        GUEST_CR0,
@@ -558,7 +558,7 @@ static const unsigned long shadow_read_write_fields[] = {
        HOST_FS_SELECTOR,
        HOST_GS_SELECTOR
 };
-static const int max_shadow_read_write_fields =
+static int max_shadow_read_write_fields =
        ARRAY_SIZE(shadow_read_write_fields);
 
 static const unsigned short vmcs_field_to_offset_table[] = {
@@ -3009,6 +3009,41 @@ static void free_kvm_area(void)
        }
 }
 
+static void init_vmcs_shadow_fields(void)
+{
+       int i, j;
+
+       /* No checks for read only fields yet */
+
+       for (i = j = 0; i < max_shadow_read_write_fields; i++) {
+               switch (shadow_read_write_fields[i]) {
+               case GUEST_BNDCFGS:
+                       if (!vmx_mpx_supported())
+                               continue;
+                       break;
+               default:
+                       break;
+               }
+
+               if (j < i)
+                       shadow_read_write_fields[j] =
+                               shadow_read_write_fields[i];
+               j++;
+       }
+       max_shadow_read_write_fields = j;
+
+       /* shadowed fields guest access without vmexit */
+       for (i = 0; i < max_shadow_read_write_fields; i++) {
+               clear_bit(shadow_read_write_fields[i],
+                         vmx_vmwrite_bitmap);
+               clear_bit(shadow_read_write_fields[i],
+                         vmx_vmread_bitmap);
+       }
+       for (i = 0; i < max_shadow_read_only_fields; i++)
+               clear_bit(shadow_read_only_fields[i],
+                         vmx_vmread_bitmap);
+}
+
 static __init int alloc_kvm_area(void)
 {
        int cpu;
@@ -3039,6 +3074,8 @@ static __init int hardware_setup(void)
                enable_vpid = 0;
        if (!cpu_has_vmx_shadow_vmcs())
                enable_shadow_vmcs = 0;
+       if (enable_shadow_vmcs)
+               init_vmcs_shadow_fields();
 
        if (!cpu_has_vmx_ept() ||
            !cpu_has_vmx_ept_4levels()) {
@@ -8803,14 +8840,6 @@ static int __init vmx_init(void)
 
        memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
        memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
-       /* shadowed read/write fields */
-       for (i = 0; i < max_shadow_read_write_fields; i++) {
-               clear_bit(shadow_read_write_fields[i], vmx_vmwrite_bitmap);
-               clear_bit(shadow_read_write_fields[i], vmx_vmread_bitmap);
-       }
-       /* shadowed read only fields */
-       for (i = 0; i < max_shadow_read_only_fields; i++)
-               clear_bit(shadow_read_only_fields[i], vmx_vmread_bitmap);
 
        /*
         * Allow direct access to the PC debug port (it is often used for I/O
index 02d6d29a63c13716168c0a68b4bdfe55c71ef9b0..3a617af60d465196bb894cebdc4042ccd4e4a92f 100644 (file)
@@ -14,6 +14,7 @@ config XTENSA
        select GENERIC_PCI_IOMAP
        select ARCH_WANT_IPC_PARSE_VERSION
        select ARCH_WANT_OPTIONAL_GPIOLIB
+       select BUILDTIME_EXTABLE_SORT
        select CLONE_BACKWARDS
        select IRQ_DOMAIN
        select HAVE_OPROFILE
@@ -189,6 +190,24 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
 
          If in doubt, say Y.
 
+config HIGHMEM
+       bool "High Memory Support"
+       help
+         Linux can use the full amount of RAM in the system by
+         default. However, the default MMUv2 setup only maps the
+         lowermost 128 MB of memory linearly to the areas starting
+         at 0xd0000000 (cached) and 0xd8000000 (uncached).
+         When there are more than 128 MB memory in the system not
+         all of it can be "permanently mapped" by the kernel.
+         The physical memory that's not permanently mapped is called
+         "high memory".
+
+         If you are compiling a kernel which will never run on a
+         machine with more than 128 MB total physical RAM, answer
+         N here.
+
+         If unsure, say Y.
+
 endmenu
 
 config XTENSA_CALIBRATE_CCOUNT
@@ -224,7 +243,6 @@ choice
 
 config XTENSA_PLATFORM_ISS
        bool "ISS"
-       depends on TTY
        select XTENSA_CALIBRATE_CCOUNT
        select SERIAL_CONSOLE
        help
diff --git a/arch/xtensa/boot/dts/kc705.dts b/arch/xtensa/boot/dts/kc705.dts
new file mode 100644 (file)
index 0000000..742a347
--- /dev/null
@@ -0,0 +1,11 @@
+/dts-v1/;
+/include/ "xtfpga.dtsi"
+/include/ "xtfpga-flash-128m.dtsi"
+
+/ {
+       compatible = "cdns,xtensa-kc705";
+       memory@0 {
+               device_type = "memory";
+               reg = <0x00000000 0x08000000>;
+       };
+};
diff --git a/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi b/arch/xtensa/boot/dts/xtfpga-flash-128m.dtsi
new file mode 100644 (file)
index 0000000..d3a88e0
--- /dev/null
@@ -0,0 +1,28 @@
+/ {
+       soc {
+               flash: flash@00000000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "cfi-flash";
+                       reg = <0x00000000 0x08000000>;
+                       bank-width = <2>;
+                       device-width = <2>;
+                       partition@0x0 {
+                               label = "data";
+                               reg = <0x00000000 0x06000000>;
+                       };
+                       partition@0x6000000 {
+                               label = "boot loader area";
+                               reg = <0x06000000 0x00800000>;
+                       };
+                       partition@0x6800000 {
+                               label = "kernel image";
+                               reg = <0x06800000 0x017e0000>;
+                       };
+                       partition@0x7fe0000 {
+                               label = "boot environment";
+                               reg = <0x07fe0000 0x00020000>;
+                       };
+               };
+        };
+};
index e5703c7beeb6dad04d2006929984b0fd8a7c3289..1d97203c18e7f787b5696b4468e4901c9b88fcbb 100644 (file)
@@ -1,26 +1,28 @@
 / {
-       flash: flash@f8000000 {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               compatible = "cfi-flash";
-               reg = <0xf8000000 0x01000000>;
-               bank-width = <2>;
-               device-width = <2>;
-               partition@0x0 {
-                       label = "boot loader area";
-                       reg = <0x00000000 0x00400000>;
+       soc {
+               flash: flash@08000000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "cfi-flash";
+                       reg = <0x08000000 0x01000000>;
+                       bank-width = <2>;
+                       device-width = <2>;
+                       partition@0x0 {
+                               label = "boot loader area";
+                               reg = <0x00000000 0x00400000>;
+                       };
+                       partition@0x400000 {
+                               label = "kernel image";
+                               reg = <0x00400000 0x00600000>;
+                       };
+                       partition@0xa00000 {
+                               label = "data";
+                               reg = <0x00a00000 0x005e0000>;
+                       };
+                       partition@0xfe0000 {
+                               label = "boot environment";
+                               reg = <0x00fe0000 0x00020000>;
+                       };
                };
-               partition@0x400000 {
-                       label = "kernel image";
-                       reg = <0x00400000 0x00600000>;
-               };
-               partition@0xa00000 {
-                       label = "data";
-                       reg = <0x00a00000 0x005e0000>;
-               };
-               partition@0xfe0000 {
-                       label = "boot environment";
-                       reg = <0x00fe0000 0x00020000>;
-               };
-        };
+       };
 };
index 6f9c10d6b689a9696d17296025a21a1167322fbc..d1c621ca8be10cba5565fa9ce1e7b82cabaef973 100644 (file)
@@ -1,18 +1,20 @@
 / {
-       flash: flash@f8000000 {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               compatible = "cfi-flash";
-               reg = <0xf8000000 0x00400000>;
-               bank-width = <2>;
-               device-width = <2>;
-               partition@0x0 {
-                       label = "boot loader area";
-                       reg = <0x00000000 0x003f0000>;
+       soc {
+               flash: flash@08000000 {
+                       #address-cells = <1>;
+                       #size-cells = <1>;
+                       compatible = "cfi-flash";
+                       reg = <0x08000000 0x00400000>;
+                       bank-width = <2>;
+                       device-width = <2>;
+                       partition@0x0 {
+                               label = "boot loader area";
+                               reg = <0x00000000 0x003f0000>;
+                       };
+                       partition@0x3f0000 {
+                               label = "boot environment";
+                               reg = <0x003f0000 0x00010000>;
+                       };
                };
-               partition@0x3f0000 {
-                       label = "boot environment";
-                       reg = <0x003f0000 0x00010000>;
-               };
-        };
+       };
 };
index e7370b11348e8d06c113d420704664740dbdec9b..dec9178840f695f0bcdd1de3cd5b17339fce8627 100644 (file)
                };
        };
 
-       serial0: serial@fd050020 {
-               device_type = "serial";
-               compatible = "ns16550a";
-               no-loopback-test;
-               reg = <0xfd050020 0x20>;
-               reg-shift = <2>;
-               interrupts = <0 1>; /* external irq 0 */
-               clocks = <&osc>;
-       };
+       soc {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "simple-bus";
+               ranges = <0x00000000 0xf0000000 0x10000000>;
 
-       enet0: ethoc@fd030000 {
-               compatible = "opencores,ethoc";
-               reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
-               interrupts = <1 1>; /* external irq 1 */
-               local-mac-address = [00 50 c2 13 6f 00];
-               clocks = <&osc>;
+               serial0: serial@0d050020 {
+                       device_type = "serial";
+                       compatible = "ns16550a";
+                       no-loopback-test;
+                       reg = <0x0d050020 0x20>;
+                       reg-shift = <2>;
+                       interrupts = <0 1>; /* external irq 0 */
+                       clocks = <&osc>;
+               };
+
+               enet0: ethoc@0d030000 {
+                       compatible = "opencores,ethoc";
+                       reg = <0x0d030000 0x4000 0x0d800000 0x4000>;
+                       interrupts = <1 1>; /* external irq 1 */
+                       local-mac-address = [00 50 c2 13 6f 00];
+                       clocks = <&osc>;
+               };
        };
 };
index 23392c5630ce9939b04bae3ff6de4fd67417852a..892aab399ac873c885953e24430a3bc741aca6ed 100644 (file)
@@ -37,23 +37,14 @@ typedef struct bp_tag {
        unsigned long data[0];  /* data */
 } bp_tag_t;
 
-typedef struct meminfo {
+struct bp_meminfo {
        unsigned long type;
        unsigned long start;
        unsigned long end;
-} meminfo_t;
-
-#define SYSMEM_BANKS_MAX 5
+};
 
 #define MEMORY_TYPE_CONVENTIONAL       0x1000
 #define MEMORY_TYPE_NONE               0x2000
 
-typedef struct sysmem_info {
-       int nr_banks;
-       meminfo_t bank[SYSMEM_BANKS_MAX];
-} sysmem_info_t;
-
-extern sysmem_info_t sysmem;
-
 #endif
 #endif
diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h
new file mode 100644 (file)
index 0000000..9f6c33d
--- /dev/null
@@ -0,0 +1,58 @@
+/*
+ * fixmap.h: compile-time virtual memory allocation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1998 Ingo Molnar
+ *
+ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
+ */
+
+#ifndef _ASM_FIXMAP_H
+#define _ASM_FIXMAP_H
+
+#include <asm/pgtable.h>
+#ifdef CONFIG_HIGHMEM
+#include <linux/threads.h>
+#include <asm/kmap_types.h>
+#endif
+
+/*
+ * Here we define all the compile-time 'special' virtual
+ * addresses. The point is to have a constant address at
+ * compile time, but to set the physical address only
+ * in the boot process. We allocate these special  addresses
+ * from the end of the consistent memory region backwards.
+ * Also this lets us do fail-safe vmalloc(), we
+ * can guarantee that these special addresses and
+ * vmalloc()-ed addresses never overlap.
+ *
+ * these 'compile-time allocated' memory buffers are
+ * fixed-size 4k pages. (or larger if used with an increment
+ * higher than 1) use fixmap_set(idx,phys) to associate
+ * physical memory with fixmap indices.
+ */
+enum fixed_addresses {
+#ifdef CONFIG_HIGHMEM
+       /* reserved pte's for temporary kernel mappings */
+       FIX_KMAP_BEGIN,
+       FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+#endif
+       __end_of_fixed_addresses
+};
+
+#define FIXADDR_TOP     (VMALLOC_START - PAGE_SIZE)
+#define FIXADDR_SIZE   (__end_of_fixed_addresses << PAGE_SHIFT)
+#define FIXADDR_START  ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK)
+
+#include <asm-generic/fixmap.h>
+
+#define kmap_get_fixmap_pte(vaddr) \
+       pte_offset_kernel( \
+               pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), \
+               (vaddr) \
+       )
+
+#endif
index 80be15124697d8e85a23859a466e97f178936010..2653ef5d55f1c9ed92d35d50f91136732334ec1b 100644 (file)
@@ -6,11 +6,54 @@
  * this archive for more details.
  *
  * Copyright (C) 2003 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
  */
 
 #ifndef _XTENSA_HIGHMEM_H
 #define _XTENSA_HIGHMEM_H
 
-extern void flush_cache_kmaps(void);
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/kmap_types.h>
+#include <asm/pgtable.h>
+
+#define PKMAP_BASE             (FIXADDR_START - PMD_SIZE)
+#define LAST_PKMAP             PTRS_PER_PTE
+#define LAST_PKMAP_MASK                (LAST_PKMAP - 1)
+#define PKMAP_NR(virt)         (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
+#define PKMAP_ADDR(nr)         (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+#define kmap_prot              PAGE_KERNEL
+
+extern pte_t *pkmap_page_table;
+
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
+
+static inline void *kmap(struct page *page)
+{
+       BUG_ON(in_interrupt());
+       if (!PageHighMem(page))
+               return page_address(page);
+       return kmap_high(page);
+}
+
+static inline void kunmap(struct page *page)
+{
+       BUG_ON(in_interrupt());
+       if (!PageHighMem(page))
+               return;
+       kunmap_high(page);
+}
+
+static inline void flush_cache_kmaps(void)
+{
+       flush_cache_all();
+}
+
+void *kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
+
+void kmap_init(void);
 
 #endif
index 216446295ada686ccb4b454319b2b45e85d96720..4b0ca35a93b1a731bf0ce2c1db32f9fabb890fef 100644 (file)
@@ -310,6 +310,10 @@ set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pteval)
        update_pte(ptep, pteval);
 }
 
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+       update_pte(ptep, pteval);
+}
 
 static inline void
 set_pmd(pmd_t *pmdp, pmd_t pmdval)
diff --git a/arch/xtensa/include/asm/sysmem.h b/arch/xtensa/include/asm/sysmem.h
new file mode 100644 (file)
index 0000000..c015c5c
--- /dev/null
@@ -0,0 +1,38 @@
+/*
+ * sysmem-related prototypes.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#ifndef _XTENSA_SYSMEM_H
+#define _XTENSA_SYSMEM_H
+
+#define SYSMEM_BANKS_MAX 31
+
+struct meminfo {
+       unsigned long start;
+       unsigned long end;
+};
+
+/*
+ * Bank array is sorted by .start.
+ * Banks don't overlap and there's at least one page gap
+ * between adjacent bank entries.
+ */
+struct sysmem_info {
+       int nr_banks;
+       struct meminfo bank[SYSMEM_BANKS_MAX];
+};
+
+extern struct sysmem_info sysmem;
+
+int add_sysmem_bank(unsigned long start, unsigned long end);
+int mem_reserve(unsigned long, unsigned long, int);
+void bootmem_init(void);
+void zones_init(void);
+
+#endif /* _XTENSA_SYSMEM_H */
index fc34274ce41bc81b3ddaa167fe887179f04c8ad7..06875feb27c28ebb870820706dc286cd9740f1ce 100644 (file)
@@ -36,6 +36,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma,
                unsigned long page);
 void local_flush_tlb_range(struct vm_area_struct *vma,
                unsigned long start, unsigned long end);
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #ifdef CONFIG_SMP
 
@@ -44,12 +45,7 @@ void flush_tlb_mm(struct mm_struct *);
 void flush_tlb_page(struct vm_area_struct *, unsigned long);
 void flush_tlb_range(struct vm_area_struct *, unsigned long,
                unsigned long);
-
-static inline void flush_tlb_kernel_range(unsigned long start,
-               unsigned long end)
-{
-       flush_tlb_all();
-}
+void flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #else /* !CONFIG_SMP */
 
@@ -58,7 +54,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
 #define flush_tlb_page(vma, page)         local_flush_tlb_page(vma, page)
 #define flush_tlb_range(vma, vmaddr, end)  local_flush_tlb_range(vma, vmaddr, \
                                                                 end)
-#define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
+#define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
+                                                                       end)
 
 #endif /* CONFIG_SMP */
 
index 84fe931bb60e1f012417d202d002813b12cf68aa..9757bb74e53296f66372dd08506d94163e8801cc 100644 (file)
@@ -50,6 +50,7 @@
 #include <asm/param.h>
 #include <asm/traps.h>
 #include <asm/smp.h>
+#include <asm/sysmem.h>
 
 #include <platform/hardware.h>
 
@@ -88,12 +89,6 @@ static char __initdata command_line[COMMAND_LINE_SIZE];
 static char default_command_line[COMMAND_LINE_SIZE] __initdata = CONFIG_CMDLINE;
 #endif
 
-sysmem_info_t __initdata sysmem;
-
-extern int mem_reserve(unsigned long, unsigned long, int);
-extern void bootmem_init(void);
-extern void zones_init(void);
-
 /*
  * Boot parameter parsing.
  *
@@ -113,31 +108,14 @@ typedef struct tagtable {
 
 /* parse current tag */
 
-static int __init add_sysmem_bank(unsigned long type, unsigned long start,
-               unsigned long end)
-{
-       if (sysmem.nr_banks >= SYSMEM_BANKS_MAX) {
-               printk(KERN_WARNING
-                               "Ignoring memory bank 0x%08lx size %ldKB\n",
-                               start, end - start);
-               return -EINVAL;
-       }
-       sysmem.bank[sysmem.nr_banks].type  = type;
-       sysmem.bank[sysmem.nr_banks].start = PAGE_ALIGN(start);
-       sysmem.bank[sysmem.nr_banks].end   = end & PAGE_MASK;
-       sysmem.nr_banks++;
-
-       return 0;
-}
-
 static int __init parse_tag_mem(const bp_tag_t *tag)
 {
-       meminfo_t *mi = (meminfo_t *)(tag->data);
+       struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
 
        if (mi->type != MEMORY_TYPE_CONVENTIONAL)
                return -1;
 
-       return add_sysmem_bank(mi->type, mi->start, mi->end);
+       return add_sysmem_bank(mi->start, mi->end);
 }
 
 __tagtable(BP_TAG_MEMORY, parse_tag_mem);
@@ -146,8 +124,8 @@ __tagtable(BP_TAG_MEMORY, parse_tag_mem);
 
 static int __init parse_tag_initrd(const bp_tag_t* tag)
 {
-       meminfo_t* mi;
-       mi = (meminfo_t*)(tag->data);
+       struct bp_meminfo *mi = (struct bp_meminfo *)(tag->data);
+
        initrd_start = (unsigned long)__va(mi->start);
        initrd_end = (unsigned long)__va(mi->end);
 
@@ -255,7 +233,7 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
                return;
 
        size &= PAGE_MASK;
-       add_sysmem_bank(MEMORY_TYPE_CONVENTIONAL, base, base + size);
+       add_sysmem_bank(base, base + size);
 }
 
 void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align)
@@ -292,8 +270,6 @@ device_initcall(xtensa_device_probe);
 
 void __init init_arch(bp_tag_t *bp_start)
 {
-       sysmem.nr_banks = 0;
-
        /* Parse boot parameters */
 
        if (bp_start)
@@ -304,10 +280,9 @@ void __init init_arch(bp_tag_t *bp_start)
 #endif
 
        if (sysmem.nr_banks == 0) {
-               sysmem.nr_banks = 1;
-               sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
-               sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
-                                    + PLATFORM_DEFAULT_MEM_SIZE;
+               add_sysmem_bank(PLATFORM_DEFAULT_MEM_START,
+                               PLATFORM_DEFAULT_MEM_START +
+                               PLATFORM_DEFAULT_MEM_SIZE);
        }
 
 #ifdef CONFIG_CMDLINE_BOOL
@@ -487,7 +462,7 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_BLK_DEV_INITRD
        if (initrd_start < initrd_end) {
                initrd_is_mapped = mem_reserve(__pa(initrd_start),
-                                              __pa(initrd_end), 0);
+                                              __pa(initrd_end), 0) == 0;
                initrd_below_start_ok = 1;
        } else {
                initrd_start = 0;
@@ -532,6 +507,7 @@ void __init setup_arch(char **cmdline_p)
                    __pa(&_Level6InterruptVector_text_end), 0);
 #endif
 
+       parse_early_param();
        bootmem_init();
 
        unflatten_and_copy_device_tree();
index aa8bd8717927185bd5b422316885ddaa98d889f7..40b5a3771fb063fb02ffaa7fe07a426a3a684677 100644 (file)
@@ -496,6 +496,21 @@ void flush_tlb_range(struct vm_area_struct *vma,
        on_each_cpu(ipi_flush_tlb_range, &fd, 1);
 }
 
+static void ipi_flush_tlb_kernel_range(void *arg)
+{
+       struct flush_data *fd = arg;
+       local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       struct flush_data fd = {
+               .addr1 = start,
+               .addr2 = end,
+       };
+       on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
+}
+
 /* Cache flush functions */
 
 static void ipi_flush_cache_all(void *arg)
index 80b33ed51f31174fd41a53bebd957c140517d8f9..4d2872fd9bb5ebf89bb15127841e5ae28e8d9b58 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/in6.h>
 
 #include <asm/uaccess.h>
+#include <asm/cacheflush.h>
 #include <asm/checksum.h>
 #include <asm/dma.h>
 #include <asm/io.h>
@@ -105,6 +106,7 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
  * Architecture-specific symbols
  */
 EXPORT_SYMBOL(__xtensa_copy_user);
+EXPORT_SYMBOL(__invalidate_icache_range);
 
 /*
  * Kernel hacking ...
@@ -127,3 +129,8 @@ EXPORT_SYMBOL(common_exception_return);
 #ifdef CONFIG_FUNCTION_TRACER
 EXPORT_SYMBOL(_mcount);
 #endif
+
+EXPORT_SYMBOL(__invalidate_dcache_range);
+#if XCHAL_DCACHE_IS_WRITEBACK
+EXPORT_SYMBOL(__flush_dcache_range);
+#endif
index f0b646d2f843feb5945601dbe65966a4a71cfec9..f54f78e24d7b5e72733f23da806272a4cba6b882 100644 (file)
@@ -4,3 +4,4 @@
 
 obj-y                  := init.o cache.o misc.o
 obj-$(CONFIG_MMU)      += fault.o mmu.o tlb.o
+obj-$(CONFIG_HIGHMEM)  += highmem.o
index ba4c47f291b17843047a410549b09cb59ba52967..63cbb867dadd64d8907176f1bd60420f8a41217a 100644 (file)
  *
  */
 
+#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM)
+#error "HIGHMEM is not supported on cores with aliasing cache."
+#endif
+
 #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
 
 /*
@@ -179,10 +183,11 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
 #else
        if (!PageReserved(page) && !test_bit(PG_arch_1, &page->flags)
            && (vma->vm_flags & VM_EXEC) != 0) {
-               unsigned long paddr = (unsigned long) page_address(page);
+               unsigned long paddr = (unsigned long)kmap_atomic(page);
                __flush_dcache_page(paddr);
                __invalidate_icache_page(paddr);
                set_bit(PG_arch_1, &page->flags);
+               kunmap_atomic((void *)paddr);
        }
 #endif
 }
diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c
new file mode 100644 (file)
index 0000000..17a8c0d
--- /dev/null
@@ -0,0 +1,72 @@
+/*
+ * High memory support for Xtensa architecture
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License.  See the file "COPYING" in the main directory of
+ * this archive for more details.
+ *
+ * Copyright (C) 2014 Cadence Design Systems Inc.
+ */
+
+#include <linux/export.h>
+#include <linux/highmem.h>
+#include <asm/tlbflush.h>
+
+static pte_t *kmap_pte;
+
+void *kmap_atomic(struct page *page)
+{
+       enum fixed_addresses idx;
+       unsigned long vaddr;
+       int type;
+
+       pagefault_disable();
+       if (!PageHighMem(page))
+               return page_address(page);
+
+       type = kmap_atomic_idx_push();
+       idx = type + KM_TYPE_NR * smp_processor_id();
+       vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+#ifdef CONFIG_DEBUG_HIGHMEM
+       BUG_ON(!pte_none(*(kmap_pte - idx)));
+#endif
+       set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC));
+
+       return (void *)vaddr;
+}
+EXPORT_SYMBOL(kmap_atomic);
+
+void __kunmap_atomic(void *kvaddr)
+{
+       int idx, type;
+
+       if (kvaddr >= (void *)FIXADDR_START &&
+           kvaddr < (void *)FIXADDR_TOP) {
+               type = kmap_atomic_idx();
+               idx = type + KM_TYPE_NR * smp_processor_id();
+
+               /*
+                * Force other mappings to Oops if they'll try to access this
+                * pte without first remap it.  Keeping stale mappings around
+                * is a bad idea also, in case the page changes cacheability
+                * attributes or becomes a protected page in a hypervisor.
+                */
+               pte_clear(&init_mm, kvaddr, kmap_pte - idx);
+               local_flush_tlb_kernel_range((unsigned long)kvaddr,
+                                            (unsigned long)kvaddr + PAGE_SIZE);
+
+               kmap_atomic_idx_pop();
+       }
+
+       pagefault_enable();
+}
+EXPORT_SYMBOL(__kunmap_atomic);
+
+void __init kmap_init(void)
+{
+       unsigned long kmap_vstart;
+
+       /* cache the first kmap pte */
+       kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+       kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
+}
index aff108df92d3a301e8ba0ccaf7e13a5fb26c9038..4224256bb215f17c52d91662f186ecb250dee361 100644 (file)
@@ -8,6 +8,7 @@
  * for more details.
  *
  * Copyright (C) 2001 - 2005 Tensilica Inc.
+ * Copyright (C) 2014 Cadence Design Systems Inc.
  *
  * Chris Zankel        <chris@zankel.net>
  * Joe Taylor  <joe@tensilica.com, joetylr@yahoo.com>
@@ -19,6 +20,7 @@
 #include <linux/errno.h>
 #include <linux/bootmem.h>
 #include <linux/gfp.h>
+#include <linux/highmem.h>
 #include <linux/swap.h>
 #include <linux/mman.h>
 #include <linux/nodemask.h>
 #include <asm/bootparam.h>
 #include <asm/page.h>
 #include <asm/sections.h>
+#include <asm/sysmem.h>
+
+struct sysmem_info sysmem __initdata;
+
+static void __init sysmem_dump(void)
+{
+       unsigned i;
+
+       pr_debug("Sysmem:\n");
+       for (i = 0; i < sysmem.nr_banks; ++i)
+               pr_debug("  0x%08lx - 0x%08lx (%ldK)\n",
+                        sysmem.bank[i].start, sysmem.bank[i].end,
+                        (sysmem.bank[i].end - sysmem.bank[i].start) >> 10);
+}
+
+/*
+ * Find bank with maximal .start such that bank.start <= start
+ */
+static inline struct meminfo * __init find_bank(unsigned long start)
+{
+       unsigned i;
+       struct meminfo *it = NULL;
+
+       for (i = 0; i < sysmem.nr_banks; ++i)
+               if (sysmem.bank[i].start <= start)
+                       it = sysmem.bank + i;
+               else
+                       break;
+       return it;
+}
+
+/*
+ * Move all memory banks starting at 'from' to a new place at 'to',
+ * adjust nr_banks accordingly.
+ * Both 'from' and 'to' must be inside the sysmem.bank.
+ *
+ * Returns: 0 (success), -ENOMEM (not enough space in the sysmem.bank).
+ */
+static int __init move_banks(struct meminfo *to, struct meminfo *from)
+{
+       unsigned n = sysmem.nr_banks - (from - sysmem.bank);
+
+       if (to > from && to - from + sysmem.nr_banks > SYSMEM_BANKS_MAX)
+               return -ENOMEM;
+       if (to != from)
+               memmove(to, from, n * sizeof(struct meminfo));
+       sysmem.nr_banks += to - from;
+       return 0;
+}
+
+/*
+ * Add new bank to sysmem. Resulting sysmem is the union of bytes of the
+ * original sysmem and the new bank.
+ *
+ * Returns: 0 (success), < 0 (error)
+ */
+int __init add_sysmem_bank(unsigned long start, unsigned long end)
+{
+       unsigned i;
+       struct meminfo *it = NULL;
+       unsigned long sz;
+       unsigned long bank_sz = 0;
+
+       if (start == end ||
+           (start < end) != (PAGE_ALIGN(start) < (end & PAGE_MASK))) {
+               pr_warn("Ignoring small memory bank 0x%08lx size: %ld bytes\n",
+                       start, end - start);
+               return -EINVAL;
+       }
+
+       start = PAGE_ALIGN(start);
+       end &= PAGE_MASK;
+       sz = end - start;
+
+       it = find_bank(start);
+
+       if (it)
+               bank_sz = it->end - it->start;
+
+       if (it && bank_sz >= start - it->start) {
+               if (end - it->start > bank_sz)
+                       it->end = end;
+               else
+                       return 0;
+       } else {
+               if (!it)
+                       it = sysmem.bank;
+               else
+                       ++it;
+
+               if (it - sysmem.bank < sysmem.nr_banks &&
+                   it->start - start <= sz) {
+                       it->start = start;
+                       if (it->end - it->start < sz)
+                               it->end = end;
+                       else
+                               return 0;
+               } else {
+                       if (move_banks(it + 1, it) < 0) {
+                               pr_warn("Ignoring memory bank 0x%08lx size %ld bytes\n",
+                                       start, end - start);
+                               return -EINVAL;
+                       }
+                       it->start = start;
+                       it->end = end;
+                       return 0;
+               }
+       }
+       sz = it->end - it->start;
+       for (i = it + 1 - sysmem.bank; i < sysmem.nr_banks; ++i)
+               if (sysmem.bank[i].start - it->start <= sz) {
+                       if (sz < sysmem.bank[i].end - it->start)
+                               it->end = sysmem.bank[i].end;
+               } else {
+                       break;
+               }
+
+       move_banks(it + 1, sysmem.bank + i);
+       return 0;
+}
 
 /*
  * mem_reserve(start, end, must_exist)
  *
  * Reserve some memory from the memory pool.
+ * If must_exist is set and a part of the region being reserved does not exist
+ * memory map is not altered.
  *
  * Parameters:
  *  start      Start of region,
  *  must_exist Must exist in memory pool.
  *
  * Returns:
- *  0 (memory area couldn't be mapped)
- * -1 (success)
+ *  0 (success)
+ *  < 0 (error)
  */
 
 int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
 {
-       int i;
-
-       if (start == end)
-               return 0;
+       struct meminfo *it;
+       struct meminfo *rm = NULL;
+       unsigned long sz;
+       unsigned long bank_sz = 0;
 
        start = start & PAGE_MASK;
        end = PAGE_ALIGN(end);
+       sz = end - start;
+       if (!sz)
+               return -EINVAL;
 
-       for (i = 0; i < sysmem.nr_banks; i++)
-               if (start < sysmem.bank[i].end
-                   && end >= sysmem.bank[i].start)
-                       break;
+       it = find_bank(start);
+
+       if (it)
+               bank_sz = it->end - it->start;
 
-       if (i == sysmem.nr_banks) {
-               if (must_exist)
-                       printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
-                               "not in any region!\n", start, end);
-               return 0;
+       if ((!it || end - it->start > bank_sz) && must_exist) {
+               pr_warn("mem_reserve: [0x%0lx, 0x%0lx) not in any region!\n",
+                       start, end);
+               return -EINVAL;
        }
 
-       if (start > sysmem.bank[i].start) {
-               if (end < sysmem.bank[i].end) {
-                       /* split entry */
-                       if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
-                               panic("meminfo overflow\n");
-                       sysmem.bank[sysmem.nr_banks].start = end;
-                       sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
-                       sysmem.nr_banks++;
+       if (it && start - it->start < bank_sz) {
+               if (start == it->start) {
+                       if (end - it->start < bank_sz) {
+                               it->start = end;
+                               return 0;
+                       } else {
+                               rm = it;
+                       }
+               } else {
+                       it->end = start;
+                       if (end - it->start < bank_sz)
+                               return add_sysmem_bank(end,
+                                                      it->start + bank_sz);
+                       ++it;
                }
-               sysmem.bank[i].end = start;
+       }
 
-       } else if (end < sysmem.bank[i].end) {
-               sysmem.bank[i].start = end;
+       if (!it)
+               it = sysmem.bank;
 
-       } else {
-               /* remove entry */
-               sysmem.nr_banks--;
-               sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
-               sysmem.bank[i].end   = sysmem.bank[sysmem.nr_banks].end;
+       for (; it < sysmem.bank + sysmem.nr_banks; ++it) {
+               if (it->end - start <= sz) {
+                       if (!rm)
+                               rm = it;
+               } else {
+                       if (it->start - start < sz)
+                               it->start = end;
+                       break;
+               }
        }
-       return -1;
+
+       if (rm)
+               move_banks(rm, it);
+
+       return 0;
 }
 
 
@@ -99,6 +239,7 @@ void __init bootmem_init(void)
        unsigned long bootmap_start, bootmap_size;
        int i;
 
+       sysmem_dump();
        max_low_pfn = max_pfn = 0;
        min_low_pfn = ~0;
 
@@ -156,19 +297,13 @@ void __init bootmem_init(void)
 
 void __init zones_init(void)
 {
-       unsigned long zones_size[MAX_NR_ZONES];
-       int i;
-
        /* All pages are DMA-able, so we put them all in the DMA zone. */
-
-       zones_size[ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET;
-       for (i = 1; i < MAX_NR_ZONES; i++)
-               zones_size[i] = 0;
-
+       unsigned long zones_size[MAX_NR_ZONES] = {
+               [ZONE_DMA] = max_low_pfn - ARCH_PFN_OFFSET,
 #ifdef CONFIG_HIGHMEM
-       zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
+               [ZONE_HIGHMEM] = max_pfn - max_low_pfn,
 #endif
-
+       };
        free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
 }
 
@@ -178,16 +313,38 @@ void __init zones_init(void)
 
 void __init mem_init(void)
 {
-       max_mapnr = max_low_pfn - ARCH_PFN_OFFSET;
-       high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
-
 #ifdef CONFIG_HIGHMEM
-#error HIGHGMEM not implemented in init.c
+       unsigned long tmp;
+
+       reset_all_zones_managed_pages();
+       for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
+               free_highmem_page(pfn_to_page(tmp));
 #endif
 
+       max_mapnr = max_pfn - ARCH_PFN_OFFSET;
+       high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);
+
        free_all_bootmem();
 
        mem_init_print_info(NULL);
+       pr_info("virtual kernel memory layout:\n"
+#ifdef CONFIG_HIGHMEM
+               "    pkmap   : 0x%08lx - 0x%08lx  (%5lu kB)\n"
+               "    fixmap  : 0x%08lx - 0x%08lx  (%5lu kB)\n"
+#endif
+               "    vmalloc : 0x%08x - 0x%08x  (%5u MB)\n"
+               "    lowmem  : 0x%08x - 0x%08lx  (%5lu MB)\n",
+#ifdef CONFIG_HIGHMEM
+               PKMAP_BASE, PKMAP_BASE + LAST_PKMAP * PAGE_SIZE,
+               (LAST_PKMAP*PAGE_SIZE) >> 10,
+               FIXADDR_START, FIXADDR_TOP,
+               (FIXADDR_TOP - FIXADDR_START) >> 10,
+#endif
+               VMALLOC_START, VMALLOC_END,
+               (VMALLOC_END - VMALLOC_START) >> 20,
+               PAGE_OFFSET, PAGE_OFFSET +
+               (max_low_pfn - min_low_pfn) * PAGE_SIZE,
+               ((max_low_pfn - min_low_pfn) * PAGE_SIZE) >> 20);
 }
 
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -204,3 +361,53 @@ void free_initmem(void)
 {
        free_initmem_default(-1);
 }
+
+static void __init parse_memmap_one(char *p)
+{
+       char *oldp;
+       unsigned long start_at, mem_size;
+
+       if (!p)
+               return;
+
+       oldp = p;
+       mem_size = memparse(p, &p);
+       if (p == oldp)
+               return;
+
+       switch (*p) {
+       case '@':
+               start_at = memparse(p + 1, &p);
+               add_sysmem_bank(start_at, start_at + mem_size);
+               break;
+
+       case '$':
+               start_at = memparse(p + 1, &p);
+               mem_reserve(start_at, start_at + mem_size, 0);
+               break;
+
+       case 0:
+               mem_reserve(mem_size, 0, 0);
+               break;
+
+       default:
+               pr_warn("Unrecognized memmap syntax: %s\n", p);
+               break;
+       }
+}
+
+static int __init parse_memmap_opt(char *str)
+{
+       while (str) {
+               char *k = strchr(str, ',');
+
+               if (k)
+                       *k++ = 0;
+
+               parse_memmap_one(str);
+               str = k;
+       }
+
+       return 0;
+}
+early_param("memmap", parse_memmap_opt);
index 861203e958da828deb140122752e95b47ddbf35f..3429b483d9f85cd2495e01c8c0a11d05bc22e16c 100644 (file)
@@ -3,6 +3,7 @@
  *
  * Extracted from init.c
  */
+#include <linux/bootmem.h>
 #include <linux/percpu.h>
 #include <linux/init.h>
 #include <linux/string.h>
 #include <asm/initialize_mmu.h>
 #include <asm/io.h>
 
+#if defined(CONFIG_HIGHMEM)
+static void * __init init_pmd(unsigned long vaddr)
+{
+       pgd_t *pgd = pgd_offset_k(vaddr);
+       pmd_t *pmd = pmd_offset(pgd, vaddr);
+
+       if (pmd_none(*pmd)) {
+               unsigned i;
+               pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE);
+
+               for (i = 0; i < 1024; i++)
+                       pte_clear(NULL, 0, pte + i);
+
+               set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK));
+               BUG_ON(pte != pte_offset_kernel(pmd, 0));
+               pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n",
+                        __func__, vaddr, pmd, pte);
+               return pte;
+       } else {
+               return pte_offset_kernel(pmd, 0);
+       }
+}
+
+static void __init fixedrange_init(void)
+{
+       BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE);
+       init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK);
+}
+#endif
+
 void __init paging_init(void)
 {
        memset(swapper_pg_dir, 0, PAGE_SIZE);
+#ifdef CONFIG_HIGHMEM
+       fixedrange_init();
+       pkmap_page_table = init_pmd(PKMAP_BASE);
+       kmap_init();
+#endif
 }
 
 /*
index ade623826788b387f150cfae5a90821f07f6b028..5ece856c5725c7cc72d0a0175bf9229330fabec5 100644 (file)
@@ -149,6 +149,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
        local_irq_restore(flags);
 }
 
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+       if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
+           end - start < _TLB_ENTRIES << PAGE_SHIFT) {
+               start &= PAGE_MASK;
+               while (start < end) {
+                       invalidate_itlb_mapping(start);
+                       invalidate_dtlb_mapping(start);
+                       start += PAGE_SIZE;
+               }
+       } else {
+               local_flush_tlb_all();
+       }
+}
+
 #ifdef CONFIG_DEBUG_TLB_SANITY
 
 static unsigned get_pte_for_vaddr(unsigned vaddr)
index d2369b799c5077f7b9240135cba96d74acfcc7cf..b3e89291cfbafcb35a1eb07f7f584c35ef7f2d81 100644 (file)
@@ -4,6 +4,7 @@
 # "prom monitor" library routines under Linux.
 #
 
-obj-y                  = console.o setup.o
+obj-y                  = setup.o
+obj-$(CONFIG_TTY)      += console.o
 obj-$(CONFIG_NET)      += network.o
 obj-$(CONFIG_BLK_DEV_SIMDISK) += simdisk.o
index f9bc8796629089a540c892109a46a9fe19f68568..b90555cb80890135fee12f7cc3ec361127106836 100644 (file)
@@ -92,18 +92,8 @@ void __init platform_setup(char** cmdline)
 
 /* early initialization */
 
-extern sysmem_info_t __initdata sysmem;
-
-void platform_init(bp_tag_t* first)
+void __init platform_init(bp_tag_t *first)
 {
-       /* Set default memory block if not provided by the bootloader. */
-
-       if (sysmem.nr_banks == 0) {
-               sysmem.nr_banks = 1;
-               sysmem.bank[0].start = PLATFORM_DEFAULT_MEM_START;
-               sysmem.bank[0].end = PLATFORM_DEFAULT_MEM_START
-                                    + PLATFORM_DEFAULT_MEM_SIZE;
-       }
 }
 
 /* Heartbeat. Let the LED blink. */
index c29c2c3ec0ad8ffc2c6427393593dbf147899d1b..b06f5f55ada952ced85de9c845dfb49cac421633 100644 (file)
@@ -170,6 +170,9 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
        acpi_status status;
        int ret;
 
+       if (pr->apic_id == -1)
+               return -ENODEV;
+
        status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
        if (ACPI_FAILURE(status) || !(sta & ACPI_STA_DEVICE_PRESENT))
                return -ENODEV;
@@ -260,10 +263,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
        }
 
        apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
-       if (apic_id < 0) {
+       if (apic_id < 0)
                acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
-               return -ENODEV;
-       }
        pr->apic_id = apic_id;
 
        cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
index d7d32c28829b17834507bf8683f2c2a5c77d0a0d..ad11ba4a412dedc893ae4bdbb9230b3583bbf00b 100644 (file)
@@ -206,13 +206,13 @@ unlock:
        spin_unlock_irqrestore(&ec->lock, flags);
 }
 
-static int acpi_ec_sync_query(struct acpi_ec *ec);
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
 
 static int ec_check_sci_sync(struct acpi_ec *ec, u8 state)
 {
        if (state & ACPI_EC_FLAG_SCI) {
                if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
-                       return acpi_ec_sync_query(ec);
+                       return acpi_ec_sync_query(ec, NULL);
        }
        return 0;
 }
@@ -443,10 +443,8 @@ acpi_handle ec_get_handle(void)
 
 EXPORT_SYMBOL(ec_get_handle);
 
-static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
-
 /*
- * Clears stale _Q events that might have accumulated in the EC.
+ * Process _Q events that might have accumulated in the EC.
  * Run with locked ec mutex.
  */
 static void acpi_ec_clear(struct acpi_ec *ec)
@@ -455,7 +453,7 @@ static void acpi_ec_clear(struct acpi_ec *ec)
        u8 value = 0;
 
        for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
-               status = acpi_ec_query_unlocked(ec, &value);
+               status = acpi_ec_sync_query(ec, &value);
                if (status || !value)
                        break;
        }
@@ -582,13 +580,18 @@ static void acpi_ec_run(void *cxt)
        kfree(handler);
 }
 
-static int acpi_ec_sync_query(struct acpi_ec *ec)
+static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
 {
        u8 value = 0;
        int status;
        struct acpi_ec_query_handler *handler, *copy;
-       if ((status = acpi_ec_query_unlocked(ec, &value)))
+
+       status = acpi_ec_query_unlocked(ec, &value);
+       if (data)
+               *data = value;
+       if (status)
                return status;
+
        list_for_each_entry(handler, &ec->list, node) {
                if (value == handler->query_bit) {
                        /* have custom handler for this bit */
@@ -612,7 +615,7 @@ static void acpi_ec_gpe_query(void *ec_cxt)
        if (!ec)
                return;
        mutex_lock(&ec->mutex);
-       acpi_ec_sync_query(ec);
+       acpi_ec_sync_query(ec, NULL);
        mutex_unlock(&ec->mutex);
 }
 
index 8986b9f22781fa667cf41a37b5889e8ac3be0a36..62ec61e8f84ac90d7c4e433ccc4ead67ff96fbd3 100644 (file)
@@ -52,6 +52,7 @@ static DEFINE_MUTEX(deferred_probe_mutex);
 static LIST_HEAD(deferred_probe_pending_list);
 static LIST_HEAD(deferred_probe_active_list);
 static struct workqueue_struct *deferred_wq;
+static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
 
 /**
  * deferred_probe_work_func() - Retry probing devices in the active list.
@@ -135,6 +136,17 @@ static bool driver_deferred_probe_enable = false;
  * This functions moves all devices from the pending list to the active
  * list and schedules the deferred probe workqueue to process them.  It
  * should be called anytime a driver is successfully bound to a device.
+ *
+ * Note, there is a race condition in multi-threaded probe. In the case where
+ * more than one device is probing at the same time, it is possible for one
+ * probe to complete successfully while another is about to defer. If the second
+ * depends on the first, then it will get put on the pending list after the
+ * trigger event has already occured and will be stuck there.
+ *
+ * The atomic 'deferred_trigger_count' is used to determine if a successful
+ * trigger has occurred in the midst of probing a driver. If the trigger count
+ * changes in the midst of a probe, then deferred processing should be triggered
+ * again.
  */
 static void driver_deferred_probe_trigger(void)
 {
@@ -147,6 +159,7 @@ static void driver_deferred_probe_trigger(void)
         * into the active list so they can be retried by the workqueue
         */
        mutex_lock(&deferred_probe_mutex);
+       atomic_inc(&deferred_trigger_count);
        list_splice_tail_init(&deferred_probe_pending_list,
                              &deferred_probe_active_list);
        mutex_unlock(&deferred_probe_mutex);
@@ -265,6 +278,7 @@ static DECLARE_WAIT_QUEUE_HEAD(probe_waitqueue);
 static int really_probe(struct device *dev, struct device_driver *drv)
 {
        int ret = 0;
+       int local_trigger_count = atomic_read(&deferred_trigger_count);
 
        atomic_inc(&probe_count);
        pr_debug("bus: '%s': %s: probing driver %s with device %s\n",
@@ -310,6 +324,9 @@ probe_failed:
                /* Driver requested deferred probing */
                dev_info(dev, "Driver %s requests probe deferral\n", drv->name);
                driver_deferred_probe_add(dev);
+               /* Did a trigger occur while probing? Need to re-trigger if yes */
+               if (local_trigger_count != atomic_read(&deferred_trigger_count))
+                       driver_deferred_probe_trigger();
        } else if (ret != -ENODEV && ret != -ENXIO) {
                /* driver matched but the probe failed */
                printk(KERN_WARNING
index e714709704e4578ccc3703ca30108e596461ad28..5b47210889e038d72f7a172062b3c4ddd2daa07d 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/string.h>
 #include <linux/platform_device.h>
 #include <linux/of_device.h>
+#include <linux/of_irq.h>
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/dma-mapping.h>
@@ -87,7 +88,11 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
                return -ENXIO;
        return dev->archdata.irqs[num];
 #else
-       struct resource *r = platform_get_resource(dev, IORESOURCE_IRQ, num);
+       struct resource *r;
+       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
+               return of_irq_get(dev->dev.of_node, num);
+
+       r = platform_get_resource(dev, IORESOURCE_IRQ, num);
 
        return r ? r->start : -ENXIO;
 #endif
index 8f5565bf34cda31504e526ccc3d79d4e7fe20fd2..fa9bb742df6e0becfa8bca52576f17b5bdafe2bf 100644 (file)
@@ -3067,7 +3067,10 @@ static int raw_cmd_copyout(int cmd, void __user *param,
        int ret;
 
        while (ptr) {
-               ret = copy_to_user(param, ptr, sizeof(*ptr));
+               struct floppy_raw_cmd cmd = *ptr;
+               cmd.next = NULL;
+               cmd.kernel_data = NULL;
+               ret = copy_to_user(param, &cmd, sizeof(cmd));
                if (ret)
                        return -EFAULT;
                param += sizeof(struct floppy_raw_cmd);
@@ -3121,10 +3124,11 @@ loop:
                return -ENOMEM;
        *rcmd = ptr;
        ret = copy_from_user(ptr, param, sizeof(*ptr));
-       if (ret)
-               return -EFAULT;
        ptr->next = NULL;
        ptr->buffer_length = 0;
+       ptr->kernel_data = NULL;
+       if (ret)
+               return -EFAULT;
        param += sizeof(struct floppy_raw_cmd);
        if (ptr->cmd_count > 33)
                        /* the command may now also take up the space
@@ -3140,7 +3144,6 @@ loop:
        for (i = 0; i < 16; i++)
                ptr->reply[i] = 0;
        ptr->resultcode = 0;
-       ptr->kernel_data = NULL;
 
        if (ptr->flags & (FD_RAW_READ | FD_RAW_WRITE)) {
                if (ptr->length <= 0)
index a535c7bf85745144a99d86ff86fc27c0399ff8ab..422391242b39ceac4a322ee532288ee46b6eba32 100644 (file)
@@ -100,6 +100,8 @@ void __init vexpress_osc_of_setup(struct device_node *node)
        struct clk *clk;
        u32 range[2];
 
+       vexpress_sysreg_of_early_init();
+
        osc = kzalloc(sizeof(*osc), GFP_KERNEL);
        if (!osc)
                return;
index 57e823c44d2ad326eeaaa788fbb67d80ba620c7d..5163ec13429d1e37082b8d32c84684284217322f 100644 (file)
@@ -66,6 +66,7 @@ static int arch_timer_ppi[MAX_TIMER_PPI];
 static struct clock_event_device __percpu *arch_timer_evt;
 
 static bool arch_timer_use_virtual = true;
+static bool arch_timer_c3stop;
 static bool arch_timer_mem_use_virtual;
 
 /*
@@ -263,7 +264,8 @@ static void __arch_timer_setup(unsigned type,
        clk->features = CLOCK_EVT_FEAT_ONESHOT;
 
        if (type == ARCH_CP15_TIMER) {
-               clk->features |= CLOCK_EVT_FEAT_C3STOP;
+               if (arch_timer_c3stop)
+                       clk->features |= CLOCK_EVT_FEAT_C3STOP;
                clk->name = "arch_sys_timer";
                clk->rating = 450;
                clk->cpumask = cpumask_of(smp_processor_id());
@@ -665,6 +667,8 @@ static void __init arch_timer_init(struct device_node *np)
                }
        }
 
+       arch_timer_c3stop = !of_property_read_bool(np, "always-on");
+
        arch_timer_register();
        arch_timer_common_init();
 }
index ca81809d159d5ebf49ddcb6f9df0bc38208fefce..7ce442148c3f5dfb32449498bc9352066050688e 100644 (file)
@@ -212,4 +212,9 @@ error_free:
        return ret;
 }
 
-CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_add);
+static void __init zevio_timer_init(struct device_node *node)
+{
+       BUG_ON(zevio_timer_add(node));
+}
+
+CLOCKSOURCE_OF_DECLARE(zevio_timer, "lsi,zevio-timer", zevio_timer_init);
index d00e5d1abd258b469bf48862a5f14b08e04f97a7..5c4369b5d834d93f05095cf6052848ec2d2ddec4 100644 (file)
@@ -242,7 +242,7 @@ static void do_powersaver(int cx_address, unsigned int mults_index,
  * Sets a new clock ratio.
  */
 
-static void longhaul_setstate(struct cpufreq_policy *policy,
+static int longhaul_setstate(struct cpufreq_policy *policy,
                unsigned int table_index)
 {
        unsigned int mults_index;
@@ -258,10 +258,12 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
        /* Safety precautions */
        mult = mults[mults_index & 0x1f];
        if (mult == -1)
-               return;
+               return -EINVAL;
+
        speed = calc_speed(mult);
        if ((speed > highest_speed) || (speed < lowest_speed))
-               return;
+               return -EINVAL;
+
        /* Voltage transition before frequency transition? */
        if (can_scale_voltage && longhaul_index < table_index)
                dir = 1;
@@ -269,8 +271,6 @@ static void longhaul_setstate(struct cpufreq_policy *policy,
        freqs.old = calc_speed(longhaul_get_cpu_mult());
        freqs.new = speed;
 
-       cpufreq_freq_transition_begin(policy, &freqs);
-
        pr_debug("Setting to FSB:%dMHz Mult:%d.%dx (%s)\n",
                        fsb, mult/10, mult%10, print_speed(speed/1000));
 retry_loop:
@@ -385,12 +385,14 @@ retry_loop:
                        goto retry_loop;
                }
        }
-       /* Report true CPU frequency */
-       cpufreq_freq_transition_end(policy, &freqs, 0);
 
-       if (!bm_timeout)
+       if (!bm_timeout) {
                printk(KERN_INFO PFX "Warning: Timeout while waiting for "
                                "idle PCI bus.\n");
+               return -EBUSY;
+       }
+
+       return 0;
 }
 
 /*
@@ -631,9 +633,10 @@ static int longhaul_target(struct cpufreq_policy *policy,
        unsigned int i;
        unsigned int dir = 0;
        u8 vid, current_vid;
+       int retval = 0;
 
        if (!can_scale_voltage)
-               longhaul_setstate(policy, table_index);
+               retval = longhaul_setstate(policy, table_index);
        else {
                /* On test system voltage transitions exceeding single
                 * step up or down were turning motherboard off. Both
@@ -648,7 +651,7 @@ static int longhaul_target(struct cpufreq_policy *policy,
                while (i != table_index) {
                        vid = (longhaul_table[i].driver_data >> 8) & 0x1f;
                        if (vid != current_vid) {
-                               longhaul_setstate(policy, i);
+                               retval = longhaul_setstate(policy, i);
                                current_vid = vid;
                                msleep(200);
                        }
@@ -657,10 +660,11 @@ static int longhaul_target(struct cpufreq_policy *policy,
                        else
                                i--;
                }
-               longhaul_setstate(policy, table_index);
+               retval = longhaul_setstate(policy, table_index);
        }
+
        longhaul_index = table_index;
-       return 0;
+       return retval;
 }
 
 
@@ -968,7 +972,15 @@ static void __exit longhaul_exit(void)
 
        for (i = 0; i < numscales; i++) {
                if (mults[i] == maxmult) {
+                       struct cpufreq_freqs freqs;
+
+                       freqs.old = policy->cur;
+                       freqs.new = longhaul_table[i].frequency;
+                       freqs.flags = 0;
+
+                       cpufreq_freq_transition_begin(policy, &freqs);
                        longhaul_setstate(policy, i);
+                       cpufreq_freq_transition_end(policy, &freqs, 0);
                        break;
                }
        }
index 49f120e1bc7be0ecb879f184424d1dc56ee63981..78904e6ca4a020d53a60f4139711441a069ad447 100644 (file)
@@ -138,22 +138,14 @@ static void powernow_k6_set_cpu_multiplier(unsigned int best_i)
 static int powernow_k6_target(struct cpufreq_policy *policy,
                unsigned int best_i)
 {
-       struct cpufreq_freqs freqs;
 
        if (clock_ratio[best_i].driver_data > max_multiplier) {
                printk(KERN_ERR PFX "invalid target frequency\n");
                return -EINVAL;
        }
 
-       freqs.old = busfreq * powernow_k6_get_cpu_multiplier();
-       freqs.new = busfreq * clock_ratio[best_i].driver_data;
-
-       cpufreq_freq_transition_begin(policy, &freqs);
-
        powernow_k6_set_cpu_multiplier(best_i);
 
-       cpufreq_freq_transition_end(policy, &freqs, 0);
-
        return 0;
 }
 
@@ -227,9 +219,20 @@ have_busfreq:
 static int powernow_k6_cpu_exit(struct cpufreq_policy *policy)
 {
        unsigned int i;
-       for (i = 0; i < 8; i++) {
-               if (i == max_multiplier)
+
+       for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) {
+               if (clock_ratio[i].driver_data == max_multiplier) {
+                       struct cpufreq_freqs freqs;
+
+                       freqs.old = policy->cur;
+                       freqs.new = clock_ratio[i].frequency;
+                       freqs.flags = 0;
+
+                       cpufreq_freq_transition_begin(policy, &freqs);
                        powernow_k6_target(policy, i);
+                       cpufreq_freq_transition_end(policy, &freqs, 0);
+                       break;
+               }
        }
        return 0;
 }
index f911645c3f6db59e18f32b68c06bfc890283c029..e61e224475ad457fd71b6934b2a5256a2d96fcc0 100644 (file)
@@ -269,8 +269,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
 
        freqs.new = powernow_table[index].frequency;
 
-       cpufreq_freq_transition_begin(policy, &freqs);
-
        /* Now do the magic poking into the MSRs.  */
 
        if (have_a0 == 1)       /* A0 errata 5 */
@@ -290,8 +288,6 @@ static int powernow_target(struct cpufreq_policy *policy, unsigned int index)
        if (have_a0 == 1)
                local_irq_enable();
 
-       cpufreq_freq_transition_end(policy, &freqs, 0);
-
        return 0;
 }
 
index a1ca3dd04a8e7b616acf50467cbbf142f8325035..0af618abebafa4b44b323d1811c1f885a52e0beb 100644 (file)
@@ -138,6 +138,7 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
        struct cpufreq_frequency_table *table;
        struct cpu_data *data;
        unsigned int cpu = policy->cpu;
+       u64 transition_latency_hz;
 
        np = of_get_cpu_node(cpu, NULL);
        if (!np)
@@ -205,8 +206,10 @@ static int corenet_cpufreq_cpu_init(struct cpufreq_policy *policy)
        for_each_cpu(i, per_cpu(cpu_mask, cpu))
                per_cpu(cpu_data, i) = data;
 
+       transition_latency_hz = 12ULL * NSEC_PER_SEC;
        policy->cpuinfo.transition_latency =
-                               (12ULL * NSEC_PER_SEC) / fsl_get_sys_freq();
+               do_div(transition_latency_hz, fsl_get_sys_freq());
+
        of_node_put(np);
 
        return 0;
index e930d4fe29c71c2d82d2317bb6681ac8d3f39d6a..1ef5ab9c9d519d175b202dbf01cba243a870e1f7 100644 (file)
@@ -145,6 +145,7 @@ exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
 
        plane->crtc = crtc;
        plane->fb = crtc->primary->fb;
+       drm_framebuffer_reference(plane->fb);
 
        return 0;
 }
index c786cd4f457bb8893fc4f02e8e27338832b1bc58..2a3ad24276f8380415940d00b87708f9cbe0d9e2 100644 (file)
@@ -263,7 +263,7 @@ struct drm_gem_object *exynos_dmabuf_prime_import(struct drm_device *drm_dev,
        buffer->sgt = sgt;
        exynos_gem_obj->base.import_attach = attach;
 
-       DRM_DEBUG_PRIME("dma_addr = 0x%x, size = 0x%lx\n", buffer->dma_addr,
+       DRM_DEBUG_PRIME("dma_addr = %pad, size = 0x%lx\n", &buffer->dma_addr,
                                                                buffer->size);
 
        return &exynos_gem_obj->base;
index eb73e3bf2a0cbe6e56f9ae6b5d81b188038f05e3..4ac438187568ed4a6894436433e404c94b90a7a2 100644 (file)
@@ -1426,9 +1426,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dsi->reg_base = devm_ioremap_resource(&pdev->dev, res);
-       if (!dsi->reg_base) {
+       if (IS_ERR(dsi->reg_base)) {
                dev_err(&pdev->dev, "failed to remap io region\n");
-               return -EADDRNOTAVAIL;
+               return PTR_ERR(dsi->reg_base);
        }
 
        dsi->phy = devm_phy_get(&pdev->dev, "dsim");
index 7afead9c3f30258b7869e69e0f2015d9c2921fca..852f2dadaebdbbe3a385b5fe28cd65a560f0c210 100644 (file)
@@ -220,7 +220,7 @@ static void vidi_win_commit(struct exynos_drm_manager *mgr, int zpos)
 
        win_data->enabled = true;
 
-       DRM_DEBUG_KMS("dma_addr = 0x%x\n", win_data->dma_addr);
+       DRM_DEBUG_KMS("dma_addr = %pad\n", &win_data->dma_addr);
 
        if (ctx->vblank_on)
                schedule_work(&ctx->work);
index ab5e93c30aa2bde43b1ed892f0437b7b5d0c8289..62a5c3627b90eaea4e6620cb829af572781e3d94 100644 (file)
@@ -50,7 +50,7 @@ bool intel_enable_ppgtt(struct drm_device *dev, bool full)
 
        /* Full ppgtt disabled by default for now due to issues. */
        if (full)
-               return false; /* HAS_PPGTT(dev) */
+               return HAS_PPGTT(dev) && (i915.enable_ppgtt == 2);
        else
                return HAS_ALIASING_PPGTT(dev);
 }
index 7753249b3a959cce7f31b8c9cf1ba0b36c18dce8..f98ba4e6e70b940c150c504782fc943abee1544a 100644 (file)
@@ -1362,10 +1362,20 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
        spin_lock(&dev_priv->irq_lock);
        for (i = 1; i < HPD_NUM_PINS; i++) {
 
-               WARN_ONCE(hpd[i] & hotplug_trigger &&
-                         dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED,
-                         "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
-                         hotplug_trigger, i, hpd[i]);
+               if (hpd[i] & hotplug_trigger &&
+                   dev_priv->hpd_stats[i].hpd_mark == HPD_DISABLED) {
+                       /*
+                        * On GMCH platforms the interrupt mask bits only
+                        * prevent irq generation, not the setting of the
+                        * hotplug bits itself. So only WARN about unexpected
+                        * interrupts on saner platforms.
+                        */
+                       WARN_ONCE(INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev),
+                                 "Received HPD interrupt (0x%08x) on pin %d (0x%08x) although disabled\n",
+                                 hotplug_trigger, i, hpd[i]);
+
+                       continue;
+               }
 
                if (!(hpd[i] & hotplug_trigger) ||
                    dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED)
index 9f5b18d9d8850e886eeb44ca68b0bd17af4c3f30..c77af69c2d8f5f8c97f5dc2045fd99a07960617b 100644 (file)
@@ -827,6 +827,7 @@ enum punit_power_well {
 # define MI_FLUSH_ENABLE                               (1 << 12)
 # define ASYNC_FLIP_PERF_DISABLE                       (1 << 14)
 # define MODE_IDLE                                     (1 << 9)
+# define STOP_RING                                     (1 << 8)
 
 #define GEN6_GT_MODE   0x20d0
 #define GEN7_GT_MODE   0x7008
index dae976f51d83357a51637fca61bd830bb9753010..69bcc42a0e44327679217d29a9415e553bd3564b 100644 (file)
@@ -9654,11 +9654,22 @@ intel_pipe_config_compare(struct drm_device *dev,
        PIPE_CONF_CHECK_I(pipe_src_w);
        PIPE_CONF_CHECK_I(pipe_src_h);
 
-       PIPE_CONF_CHECK_I(gmch_pfit.control);
-       /* pfit ratios are autocomputed by the hw on gen4+ */
-       if (INTEL_INFO(dev)->gen < 4)
-               PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
-       PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+       /*
+        * FIXME: BIOS likes to set up a cloned config with lvds+external
+        * screen. Since we don't yet re-compute the pipe config when moving
+        * just the lvds port away to another pipe the sw tracking won't match.
+        *
+        * Proper atomic modesets with recomputed global state will fix this.
+        * Until then just don't check gmch state for inherited modes.
+        */
+       if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_INHERITED_MODE)) {
+               PIPE_CONF_CHECK_I(gmch_pfit.control);
+               /* pfit ratios are autocomputed by the hw on gen4+ */
+               if (INTEL_INFO(dev)->gen < 4)
+                       PIPE_CONF_CHECK_I(gmch_pfit.pgm_ratios);
+               PIPE_CONF_CHECK_I(gmch_pfit.lvds_border_bits);
+       }
+
        PIPE_CONF_CHECK_I(pch_pfit.enabled);
        if (current_config->pch_pfit.enabled) {
                PIPE_CONF_CHECK_I(pch_pfit.pos);
@@ -11616,6 +11627,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
                            base.head) {
                memset(&crtc->config, 0, sizeof(crtc->config));
 
+               crtc->config.quirks |= PIPE_CONFIG_QUIRK_INHERITED_MODE;
+
                crtc->active = dev_priv->display.get_pipe_config(crtc,
                                                                 &crtc->config);
 
index d2a55884ad523b8a54546bfbbe0565889e73261e..dfa85289f28f301fe259b522b45ef1521489175e 100644 (file)
@@ -3619,7 +3619,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
 {
        struct drm_connector *connector = &intel_connector->base;
        struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
-       struct drm_device *dev = intel_dig_port->base.base.dev;
+       struct intel_encoder *intel_encoder = &intel_dig_port->base;
+       struct drm_device *dev = intel_encoder->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_display_mode *fixed_mode = NULL;
        bool has_dpcd;
@@ -3629,6 +3630,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        if (!is_edp(intel_dp))
                return true;
 
+       /* The VDD bit needs a power domain reference, so if the bit is already
+        * enabled when we boot, grab this reference. */
+       if (edp_have_panel_vdd(intel_dp)) {
+               enum intel_display_power_domain power_domain;
+               power_domain = intel_display_port_power_domain(intel_encoder);
+               intel_display_power_get(dev_priv, power_domain);
+       }
+
        /* Cache DPCD and EDID for edp. */
        intel_edp_panel_vdd_on(intel_dp);
        has_dpcd = intel_dp_get_dpcd(intel_dp);
index 0542de98226018a9427519d0eb6996714f3c0582..328b1a70264b4c12a07400de7284b2968813aa94 100644 (file)
@@ -236,7 +236,8 @@ struct intel_crtc_config {
         * tracked with quirk flags so that fastboot and state checker can act
         * accordingly.
         */
-#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS      (1<<0) /* unreliable sync mode.flags */
+#define PIPE_CONFIG_QUIRK_INHERITED_MODE       (1<<1) /* mode inherited from firmware */
        unsigned long quirks;
 
        /* User requested mode, only valid as a starting point to
index b4d44e62f0c769746a538f70afdf9916c95f6bd8..fce4a0d93c0b19b7d51f4d2578331b13aef51399 100644 (file)
@@ -132,6 +132,16 @@ static int intelfb_create(struct drm_fb_helper *helper,
 
        mutex_lock(&dev->struct_mutex);
 
+       if (intel_fb &&
+           (sizes->fb_width > intel_fb->base.width ||
+            sizes->fb_height > intel_fb->base.height)) {
+               DRM_DEBUG_KMS("BIOS fb too small (%dx%d), we require (%dx%d),"
+                             " releasing it\n",
+                             intel_fb->base.width, intel_fb->base.height,
+                             sizes->fb_width, sizes->fb_height);
+               drm_framebuffer_unreference(&intel_fb->base);
+               intel_fb = ifbdev->fb = NULL;
+       }
        if (!intel_fb || WARN_ON(!intel_fb->obj)) {
                DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
                ret = intelfb_alloc(helper, sizes);
index b0413e190625b26c0552e8a737479f52b3dd5c9d..157267aa356165b7b7fcd94c3318a79f04f2bb8c 100644 (file)
@@ -821,11 +821,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder)
        }
 }
 
-static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
+static int hdmi_portclock_limit(struct intel_hdmi *hdmi, bool respect_dvi_limit)
 {
        struct drm_device *dev = intel_hdmi_to_dev(hdmi);
 
-       if (!hdmi->has_hdmi_sink || IS_G4X(dev))
+       if ((respect_dvi_limit && !hdmi->has_hdmi_sink) || IS_G4X(dev))
                return 165000;
        else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
                return 300000;
@@ -837,7 +837,8 @@ static enum drm_mode_status
 intel_hdmi_mode_valid(struct drm_connector *connector,
                      struct drm_display_mode *mode)
 {
-       if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector)))
+       if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector),
+                                              true))
                return MODE_CLOCK_HIGH;
        if (mode->clock < 20000)
                return MODE_CLOCK_LOW;
@@ -879,7 +880,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
        struct drm_device *dev = encoder->base.dev;
        struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
        int clock_12bpc = pipe_config->adjusted_mode.crtc_clock * 3 / 2;
-       int portclock_limit = hdmi_portclock_limit(intel_hdmi);
+       int portclock_limit = hdmi_portclock_limit(intel_hdmi, false);
        int desired_bpp;
 
        if (intel_hdmi->color_range_auto) {
index 6bc68bdcf433cf06a68d52d95b063f9a42795194..79fb4cc2137c19d2cf4ad5dc23e6c5883eb58e4d 100644 (file)
@@ -437,32 +437,41 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
        I915_WRITE(HWS_PGA, addr);
 }
 
-static int init_ring_common(struct intel_ring_buffer *ring)
+static bool stop_ring(struct intel_ring_buffer *ring)
 {
-       struct drm_device *dev = ring->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj = ring->obj;
-       int ret = 0;
-       u32 head;
+       struct drm_i915_private *dev_priv = to_i915(ring->dev);
 
-       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+       if (!IS_GEN2(ring->dev)) {
+               I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING));
+               if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) {
+                       DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
+                       return false;
+               }
+       }
 
-       /* Stop the ring if it's running. */
        I915_WRITE_CTL(ring, 0);
        I915_WRITE_HEAD(ring, 0);
        ring->write_tail(ring, 0);
-       if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000))
-               DRM_ERROR("%s :timed out trying to stop ring\n", ring->name);
 
-       if (I915_NEED_GFX_HWS(dev))
-               intel_ring_setup_status_page(ring);
-       else
-               ring_setup_phys_status_page(ring);
+       if (!IS_GEN2(ring->dev)) {
+               (void)I915_READ_CTL(ring);
+               I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING));
+       }
 
-       head = I915_READ_HEAD(ring) & HEAD_ADDR;
+       return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0;
+}
 
-       /* G45 ring initialization fails to reset head to zero */
-       if (head != 0) {
+static int init_ring_common(struct intel_ring_buffer *ring)
+{
+       struct drm_device *dev = ring->dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_gem_object *obj = ring->obj;
+       int ret = 0;
+
+       gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
+
+       if (!stop_ring(ring)) {
+               /* G45 ring initialization often fails to reset head to zero */
                DRM_DEBUG_KMS("%s head not reset to zero "
                              "ctl %08x head %08x tail %08x start %08x\n",
                              ring->name,
@@ -471,9 +480,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                              I915_READ_TAIL(ring),
                              I915_READ_START(ring));
 
-               I915_WRITE_HEAD(ring, 0);
-
-               if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+               if (!stop_ring(ring)) {
                        DRM_ERROR("failed to set %s head to zero "
                                  "ctl %08x head %08x tail %08x start %08x\n",
                                  ring->name,
@@ -481,9 +488,16 @@ static int init_ring_common(struct intel_ring_buffer *ring)
                                  I915_READ_HEAD(ring),
                                  I915_READ_TAIL(ring),
                                  I915_READ_START(ring));
+                       ret = -EIO;
+                       goto out;
                }
        }
 
+       if (I915_NEED_GFX_HWS(dev))
+               intel_ring_setup_status_page(ring);
+       else
+               ring_setup_phys_status_page(ring);
+
        /* Initialize the ring. This must happen _after_ we've cleared the ring
         * registers with the above sequence (the readback of the HEAD registers
         * also enforces ordering), otherwise the hw might lose the new ring
index 270a6a9734387b6079e9348e17773214a293ad0a..2b91c4b4d34b2efd6029bb52f46ece14eb2e515f 100644 (file)
@@ -34,6 +34,7 @@ struct  intel_hw_status_page {
 #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
 
 #define I915_READ_MODE(ring) I915_READ(RING_MI_MODE((ring)->mmio_base))
+#define I915_WRITE_MODE(ring, val) I915_WRITE(RING_MI_MODE((ring)->mmio_base), val)
 
 enum intel_ring_hangcheck_action {
        HANGCHECK_IDLE = 0,
index 3e6c0f3ed592a6b746610b9b6dd1986e50e1ace7..ef9957dbac943bdda6a1e6fd9bca142ebb202cef 100644 (file)
@@ -510,9 +510,8 @@ static void update_cursor(struct drm_crtc *crtc)
                                        MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
                } else {
                        /* disable cursor: */
-                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), 0);
-                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
-                                       MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB));
+                       mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+                                       mdp4_kms->blank_cursor_iova);
                }
 
                /* and drop the iova ref + obj rev when done scanning out: */
@@ -574,11 +573,9 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
 
        if (old_bo) {
                /* drop our previous reference: */
-               msm_gem_put_iova(old_bo, mdp4_kms->id);
-               drm_gem_object_unreference_unlocked(old_bo);
+               drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
        }
 
-       crtc_flush(crtc);
        request_pending(crtc, PENDING_CURSOR);
 
        return 0;
index c740ccd1cc67fb4d1a08ef6a65efc671daec8595..8edd531cb62166ad1291be18ffc26ba033cbc71d 100644 (file)
@@ -70,12 +70,12 @@ irqreturn_t mdp4_irq(struct msm_kms *kms)
 
        VERB("status=%08x", status);
 
+       mdp_dispatch_irqs(mdp_kms, status);
+
        for (id = 0; id < priv->num_crtcs; id++)
                if (status & mdp4_crtc_vblank(priv->crtcs[id]))
                        drm_handle_vblank(dev, id);
 
-       mdp_dispatch_irqs(mdp_kms, status);
-
        return IRQ_HANDLED;
 }
 
index 272e707c948704e6ff36cc8df263723fdbbd2a71..0bb4faa17523e0862d7f32df0939976424e82d6a 100644 (file)
@@ -144,6 +144,10 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
 static void mdp4_destroy(struct msm_kms *kms)
 {
        struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
+       if (mdp4_kms->blank_cursor_iova)
+               msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+       if (mdp4_kms->blank_cursor_bo)
+               drm_gem_object_unreference(mdp4_kms->blank_cursor_bo);
        kfree(mdp4_kms);
 }
 
@@ -372,6 +376,23 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
                goto fail;
        }
 
+       mutex_lock(&dev->struct_mutex);
+       mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
+       mutex_unlock(&dev->struct_mutex);
+       if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
+               ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
+               dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
+               mdp4_kms->blank_cursor_bo = NULL;
+               goto fail;
+       }
+
+       ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+                       &mdp4_kms->blank_cursor_iova);
+       if (ret) {
+               dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
+               goto fail;
+       }
+
        return kms;
 
 fail:
index 66a4d31aec80e010e5f5914705f05f7f346dc68c..715520c54cdec48f93750da0843213878aaabf86 100644 (file)
@@ -44,6 +44,10 @@ struct mdp4_kms {
        struct clk *lut_clk;
 
        struct mdp_irq error_handler;
+
+       /* empty/blank cursor bo to use when cursor is "disabled" */
+       struct drm_gem_object *blank_cursor_bo;
+       uint32_t blank_cursor_iova;
 };
 #define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
 
index 353d494a497f22c96e0f51d2049cd765ad595f07..f2b985bc2adf41f8330dd4bfb8dcafdb30b43c53 100644 (file)
@@ -71,11 +71,11 @@ static void mdp5_irq_mdp(struct mdp_kms *mdp_kms)
 
        VERB("status=%08x", status);
 
+       mdp_dispatch_irqs(mdp_kms, status);
+
        for (id = 0; id < priv->num_crtcs; id++)
                if (status & mdp5_crtc_vblank(priv->crtcs[id]))
                        drm_handle_vblank(dev, id);
-
-       mdp_dispatch_irqs(mdp_kms, status);
 }
 
 irqreturn_t mdp5_irq(struct msm_kms *kms)
index 6c6d7d4c9b4e77848994222f9bd5bf2b26b6a043..a752ab83b8104124a232d3e6701846c661fabfa3 100644 (file)
@@ -62,11 +62,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
        dma_addr_t paddr;
        int ret, size;
 
-       /* only doing ARGB32 since this is what is needed to alpha-blend
-        * with video overlays:
-        */
        sizes->surface_bpp = 32;
-       sizes->surface_depth = 32;
+       sizes->surface_depth = 24;
 
        DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
                        sizes->surface_height, sizes->surface_bpp,
index 3da8264d3039017bd358ccaca48197356f932f68..bb8026daebc9426759d2bb31f2a6360dfed606a3 100644 (file)
@@ -118,8 +118,10 @@ static void put_pages(struct drm_gem_object *obj)
 
                if (iommu_present(&platform_bus_type))
                        drm_gem_put_pages(obj, msm_obj->pages, true, false);
-               else
+               else {
                        drm_mm_remove_node(msm_obj->vram_node);
+                       drm_free_large(msm_obj->pages);
+               }
 
                msm_obj->pages = NULL;
        }
index 36c717af6cf90830324a3538ac05dfd4306f3ccb..edb871d7d395cbb4af140120953b9e854d72aae7 100644 (file)
@@ -312,7 +312,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
        struct drm_device *drm = crtc->dev;
        struct drm_plane *plane;
 
-       list_for_each_entry(plane, &drm->mode_config.plane_list, head) {
+       drm_for_each_legacy_plane(plane, &drm->mode_config.plane_list) {
                if (plane->crtc == crtc) {
                        tegra_plane_disable(plane);
                        plane->crtc = NULL;
index 931490b9cfed04b1365a87fb2d83c0526e114fdd..87df0b3674fda203c96baef3ff3030a87424a800 100644 (file)
@@ -1214,14 +1214,36 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
                SVGA3dCmdSurfaceDMA dma;
        } *cmd;
        int ret;
+       SVGA3dCmdSurfaceDMASuffix *suffix;
+       uint32_t bo_size;
 
        cmd = container_of(header, struct vmw_dma_cmd, header);
+       suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->dma +
+                                              header->size - sizeof(*suffix));
+
+       /* Make sure device and verifier stays in sync. */
+       if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
+               DRM_ERROR("Invalid DMA suffix size.\n");
+               return -EINVAL;
+       }
+
        ret = vmw_translate_guest_ptr(dev_priv, sw_context,
                                      &cmd->dma.guest.ptr,
                                      &vmw_bo);
        if (unlikely(ret != 0))
                return ret;
 
+       /* Make sure DMA doesn't cross BO boundaries. */
+       bo_size = vmw_bo->base.num_pages * PAGE_SIZE;
+       if (unlikely(cmd->dma.guest.ptr.offset > bo_size)) {
+               DRM_ERROR("Invalid DMA offset.\n");
+               return -EINVAL;
+       }
+
+       bo_size -= cmd->dma.guest.ptr.offset;
+       if (unlikely(suffix->maximumOffset > bo_size))
+               suffix->maximumOffset = bo_size;
+
        ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
                                user_surface_converter, &cmd->dma.host.sid,
                                NULL);
index 6d02e3b063756f6225078df7e00f981478ef5f45..d76f0b70c6e09a0dd4bf52d77df7aa85651377c9 100644 (file)
@@ -365,12 +365,12 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
                if (cpu_has_tjmax(c))
                        dev_warn(dev, "Unable to read TjMax from CPU %u\n", id);
        } else {
-               val = (eax >> 16) & 0x7f;
+               val = (eax >> 16) & 0xff;
                /*
                 * If the TjMax is not plausible, an assumption
                 * will be used
                 */
-               if (val >= 85) {
+               if (val) {
                        dev_dbg(dev, "TjMax is %d degrees C\n", val);
                        return val * 1000;
                }
index d4e8983fba537d71b8da25b5b0768f088678722d..23f38cf2c5cd030c2ba9e3aebd0d199c4925e930 100644 (file)
@@ -1,10 +1,10 @@
 config INFINIBAND_CXGB4
-       tristate "Chelsio T4 RDMA Driver"
+       tristate "Chelsio T4/T5 RDMA Driver"
        depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n)
        select GENERIC_ALLOCATOR
        ---help---
-         This is an iWARP/RDMA driver for the Chelsio T4 1GbE and
-         10GbE adapters.
+         This is an iWARP/RDMA driver for the Chelsio T4 and T5
+         1GbE, 10GbE adapters and T5 40GbE adapter.
 
          For general information about Chelsio and our products, visit
          our website at <http://www.chelsio.com>.
index 185452abf32cf336049e20802759a4352996392b..1f863a96a480fd1ab087989acea029a781c7c23e 100644 (file)
@@ -587,6 +587,10 @@ static int send_connect(struct c4iw_ep *ep)
                opt2 |= SACK_EN(1);
        if (wscale && enable_tcp_window_scaling)
                opt2 |= WND_SCALE_EN(1);
+       if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+               opt2 |= T5_OPT_2_VALID;
+               opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+       }
        t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
 
        if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
@@ -996,7 +1000,7 @@ static void close_complete_upcall(struct c4iw_ep *ep, int status)
 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
 {
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-       state_set(&ep->com, ABORTING);
+       __state_set(&ep->com, ABORTING);
        set_bit(ABORT_CONN, &ep->com.history);
        return send_abort(ep, skb, gfp);
 }
@@ -1154,7 +1158,7 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
        return credits;
 }
 
-static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
+static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
 {
        struct mpa_message *mpa;
        struct mpa_v2_conn_params *mpa_v2_params;
@@ -1164,6 +1168,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
        struct c4iw_qp_attributes attrs;
        enum c4iw_qp_attr_mask mask;
        int err;
+       int disconnect = 0;
 
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
 
@@ -1173,7 +1178,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
         * will abort the connection.
         */
        if (stop_ep_timer(ep))
-               return;
+               return 0;
 
        /*
         * If we get more than the supported amount of private data
@@ -1195,7 +1200,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
         * if we don't even have the mpa message, then bail.
         */
        if (ep->mpa_pkt_len < sizeof(*mpa))
-               return;
+               return 0;
        mpa = (struct mpa_message *) ep->mpa_pkt;
 
        /* Validate MPA header. */
@@ -1235,7 +1240,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
         * We'll continue process when more data arrives.
         */
        if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
-               return;
+               return 0;
 
        if (mpa->flags & MPA_REJECT) {
                err = -ECONNREFUSED;
@@ -1337,9 +1342,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
                attrs.layer_etype = LAYER_MPA | DDP_LLP;
                attrs.ecode = MPA_NOMATCH_RTR;
                attrs.next_state = C4IW_QP_STATE_TERMINATE;
+               attrs.send_term = 1;
                err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
-                               C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+                               C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
                err = -ENOMEM;
+               disconnect = 1;
                goto out;
        }
 
@@ -1355,9 +1362,11 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
                attrs.layer_etype = LAYER_MPA | DDP_LLP;
                attrs.ecode = MPA_INSUFF_IRD;
                attrs.next_state = C4IW_QP_STATE_TERMINATE;
+               attrs.send_term = 1;
                err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
-                               C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+                               C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
                err = -ENOMEM;
+               disconnect = 1;
                goto out;
        }
        goto out;
@@ -1366,7 +1375,7 @@ err:
        send_abort(ep, skb, GFP_KERNEL);
 out:
        connect_reply_upcall(ep, err);
-       return;
+       return disconnect;
 }
 
 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
@@ -1524,6 +1533,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
        unsigned int tid = GET_TID(hdr);
        struct tid_info *t = dev->rdev.lldi.tids;
        __u8 status = hdr->status;
+       int disconnect = 0;
 
        ep = lookup_tid(t, tid);
        if (!ep)
@@ -1539,7 +1549,7 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
        switch (ep->com.state) {
        case MPA_REQ_SENT:
                ep->rcv_seq += dlen;
-               process_mpa_reply(ep, skb);
+               disconnect = process_mpa_reply(ep, skb);
                break;
        case MPA_REQ_WAIT:
                ep->rcv_seq += dlen;
@@ -1555,13 +1565,16 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
                               ep->com.state, ep->hwtid, status);
                attrs.next_state = C4IW_QP_STATE_TERMINATE;
                c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
-                              C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
+                              C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
+               disconnect = 1;
                break;
        }
        default:
                break;
        }
        mutex_unlock(&ep->com.mutex);
+       if (disconnect)
+               c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
        return 0;
 }
 
@@ -2009,6 +2022,10 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
                if (tcph->ece && tcph->cwr)
                        opt2 |= CCTRL_ECN(1);
        }
+       if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
+               opt2 |= T5_OPT_2_VALID;
+               opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
+       }
 
        rpl = cplhdr(skb);
        INIT_TP_WR(rpl, ep->hwtid);
@@ -3482,9 +3499,9 @@ static void process_timeout(struct c4iw_ep *ep)
                        __func__, ep, ep->hwtid, ep->com.state);
                abort = 0;
        }
-       mutex_unlock(&ep->com.mutex);
        if (abort)
                abort_connection(ep, NULL, GFP_KERNEL);
+       mutex_unlock(&ep->com.mutex);
        c4iw_put_ep(&ep->com);
 }
 
index 7b8c5806a09d84d912d274d4a5da814109e921b8..7474b490760a413f9f13d9e04ead79319a6fd55e 100644 (file)
@@ -435,6 +435,7 @@ struct c4iw_qp_attributes {
        u8 ecode;
        u16 sq_db_inc;
        u16 rq_db_inc;
+       u8 send_term;
 };
 
 struct c4iw_qp {
index 7b5114cb486f64f118beb7f2415ad415d75f40ae..086f62f5dc9e2ba5978e81f02e8e392c8e201774 100644 (file)
@@ -1388,11 +1388,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                        qhp->attr.layer_etype = attrs->layer_etype;
                        qhp->attr.ecode = attrs->ecode;
                        ep = qhp->ep;
-                       disconnect = 1;
-                       c4iw_get_ep(&qhp->ep->com);
-                       if (!internal)
+                       if (!internal) {
+                               c4iw_get_ep(&qhp->ep->com);
                                terminate = 1;
-                       else {
+                               disconnect = 1;
+                       } else {
+                               terminate = qhp->attr.send_term;
                                ret = rdma_fini(rhp, qhp, ep);
                                if (ret)
                                        goto err;
@@ -1776,11 +1777,15 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        /*
         * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
         * ringing the queue db when we're in DB_FULL mode.
+        * Only allow this on T4 devices.
         */
        attrs.sq_db_inc = attr->sq_psn;
        attrs.rq_db_inc = attr->rq_psn;
        mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
        mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
+       if (is_t5(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
+           (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
+               return -EINVAL;
 
        return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
 }
index dc193c292671ca49e889bc6bedb10d7c1a5fe8ce..6121ca08fe588bff67aab81fe7df06119287292b 100644 (file)
@@ -836,4 +836,18 @@ struct ulptx_idata {
 #define V_RX_DACK_CHANGE(x) ((x) << S_RX_DACK_CHANGE)
 #define F_RX_DACK_CHANGE    V_RX_DACK_CHANGE(1U)
 
+enum {                     /* TCP congestion control algorithms */
+       CONG_ALG_RENO,
+       CONG_ALG_TAHOE,
+       CONG_ALG_NEWRENO,
+       CONG_ALG_HIGHSPEED
+};
+
+#define S_CONG_CNTRL    14
+#define M_CONG_CNTRL    0x3
+#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
+#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
+
+#define T5_OPT_2_VALID       (1 << 31)
+
 #endif /* _T4FW_RI_API_H_ */
index 41be897df8d5521250d79dee5362c08fe0f80067..3899ba7821c5e78d4496c29ad3fba2b8b4ffcad9 100644 (file)
@@ -41,6 +41,7 @@
 #define ARMADA_370_XP_INT_SET_ENABLE_OFFS      (0x30)
 #define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS    (0x34)
 #define ARMADA_370_XP_INT_SOURCE_CTL(irq)      (0x100 + irq*4)
+#define ARMADA_370_XP_INT_SOURCE_CPU_MASK      0xF
 
 #define ARMADA_370_XP_CPU_INTACK_OFFS          (0x44)
 #define ARMADA_375_PPI_CAUSE                   (0x10)
@@ -132,8 +133,7 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip,
                                       struct msi_desc *desc)
 {
        struct msi_msg msg;
-       irq_hw_number_t hwirq;
-       int virq;
+       int virq, hwirq;
 
        hwirq = armada_370_xp_alloc_msi();
        if (hwirq < 0)
@@ -159,8 +159,19 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip,
                                           unsigned int irq)
 {
        struct irq_data *d = irq_get_irq_data(irq);
+       unsigned long hwirq = d->hwirq;
+
        irq_dispose_mapping(irq);
-       armada_370_xp_free_msi(d->hwirq);
+       armada_370_xp_free_msi(hwirq);
+}
+
+static int armada_370_xp_check_msi_device(struct msi_chip *chip, struct pci_dev *dev,
+                                         int nvec, int type)
+{
+       /* We support MSI, but not MSI-X */
+       if (type == PCI_CAP_ID_MSI)
+               return 0;
+       return -EINVAL;
 }
 
 static struct irq_chip armada_370_xp_msi_irq_chip = {
@@ -201,6 +212,7 @@ static int armada_370_xp_msi_init(struct device_node *node,
 
        msi_chip->setup_irq = armada_370_xp_setup_msi_irq;
        msi_chip->teardown_irq = armada_370_xp_teardown_msi_irq;
+       msi_chip->check_device = armada_370_xp_check_msi_device;
        msi_chip->of_node = node;
 
        armada_370_xp_msi_domain =
@@ -244,35 +256,18 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
 static int armada_xp_set_affinity(struct irq_data *d,
                                  const struct cpumask *mask_val, bool force)
 {
-       unsigned long reg;
-       unsigned long new_mask = 0;
-       unsigned long online_mask = 0;
-       unsigned long count = 0;
        irq_hw_number_t hwirq = irqd_to_hwirq(d);
+       unsigned long reg, mask;
        int cpu;
 
-       for_each_cpu(cpu, mask_val) {
-               new_mask |= 1 << cpu_logical_map(cpu);
-               count++;
-       }
-
-       /*
-        * Forbid mutlicore interrupt affinity
-        * This is required since the MPIC HW doesn't limit
-        * several CPUs from acknowledging the same interrupt.
-        */
-       if (count > 1)
-               return -EINVAL;
-
-       for_each_cpu(cpu, cpu_online_mask)
-               online_mask |= 1 << cpu_logical_map(cpu);
+       /* Select a single core from the affinity mask which is online */
+       cpu = cpumask_any_and(mask_val, cpu_online_mask);
+       mask = 1UL << cpu_logical_map(cpu);
 
        raw_spin_lock(&irq_controller_lock);
-
        reg = readl(main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
-       reg = (reg & (~online_mask)) | new_mask;
+       reg = (reg & (~ARMADA_370_XP_INT_SOURCE_CPU_MASK)) | mask;
        writel(reg, main_int_base + ARMADA_370_XP_INT_SOURCE_CTL(hwirq));
-
        raw_spin_unlock(&irq_controller_lock);
 
        return 0;
@@ -494,15 +489,6 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
 
 #ifdef CONFIG_SMP
        armada_xp_mpic_smp_cpu_init();
-
-       /*
-        * Set the default affinity from all CPUs to the boot cpu.
-        * This is required since the MPIC doesn't limit several CPUs
-        * from acknowledging the same interrupt.
-        */
-       cpumask_clear(irq_default_affinity);
-       cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
-
 #endif
 
        armada_370_xp_msi_init(node, main_int_res.start);
index fc817d28d1fe50341bc4c9fe0d4de188f599d947..3d15d16a7088d2d886ef769f96534d896ded1b73 100644 (file)
@@ -107,7 +107,7 @@ static int __init crossbar_of_init(struct device_node *node)
        int i, size, max, reserved = 0, entry;
        const __be32 *irqsr;
 
-       cb = kzalloc(sizeof(struct cb_device *), GFP_KERNEL);
+       cb = kzalloc(sizeof(*cb), GFP_KERNEL);
 
        if (!cb)
                return -ENOMEM;
index 1bf4a71919ec73957a00550dec49b3a3b3a1292c..9380be7b18954b9308ed42abe5fafa2f87c0f76a 100644 (file)
@@ -2488,6 +2488,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
 
                } else {
                        inc_hit_counter(cache, bio);
+                       pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
 
                        if (bio_data_dir(bio) == WRITE && writethrough_mode(&cache->features) &&
                            !is_dirty(cache, lookup_result.cblock))
index 53728be84dee35ac8dfabbf48087919841049f1a..13abade76ad9bbd65c83c67d35f075506b17b63f 100644 (file)
@@ -232,6 +232,13 @@ struct thin_c {
        struct bio_list deferred_bio_list;
        struct bio_list retry_on_resume_list;
        struct rb_root sort_bio_list; /* sorted list of deferred bios */
+
+       /*
+        * Ensures the thin is not destroyed until the worker has finished
+        * iterating the active_thins list.
+        */
+       atomic_t refcount;
+       struct completion can_destroy;
 };
 
 /*----------------------------------------------------------------*/
@@ -1486,6 +1493,45 @@ static void process_thin_deferred_bios(struct thin_c *tc)
        blk_finish_plug(&plug);
 }
 
+static void thin_get(struct thin_c *tc);
+static void thin_put(struct thin_c *tc);
+
+/*
+ * We can't hold rcu_read_lock() around code that can block.  So we
+ * find a thin with the rcu lock held; bump a refcount; then drop
+ * the lock.
+ */
+static struct thin_c *get_first_thin(struct pool *pool)
+{
+       struct thin_c *tc = NULL;
+
+       rcu_read_lock();
+       if (!list_empty(&pool->active_thins)) {
+               tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list);
+               thin_get(tc);
+       }
+       rcu_read_unlock();
+
+       return tc;
+}
+
+static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc)
+{
+       struct thin_c *old_tc = tc;
+
+       rcu_read_lock();
+       list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) {
+               thin_get(tc);
+               thin_put(old_tc);
+               rcu_read_unlock();
+               return tc;
+       }
+       thin_put(old_tc);
+       rcu_read_unlock();
+
+       return NULL;
+}
+
 static void process_deferred_bios(struct pool *pool)
 {
        unsigned long flags;
@@ -1493,10 +1539,11 @@ static void process_deferred_bios(struct pool *pool)
        struct bio_list bios;
        struct thin_c *tc;
 
-       rcu_read_lock();
-       list_for_each_entry_rcu(tc, &pool->active_thins, list)
+       tc = get_first_thin(pool);
+       while (tc) {
                process_thin_deferred_bios(tc);
-       rcu_read_unlock();
+               tc = get_next_thin(pool, tc);
+       }
 
        /*
         * If there are any deferred flush bios, we must commit
@@ -1578,7 +1625,7 @@ static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
 {
        struct noflush_work w;
 
-       INIT_WORK(&w.worker, fn);
+       INIT_WORK_ONSTACK(&w.worker, fn);
        w.tc = tc;
        atomic_set(&w.complete, 0);
        init_waitqueue_head(&w.wait);
@@ -3061,11 +3108,25 @@ static struct target_type pool_target = {
 /*----------------------------------------------------------------
  * Thin target methods
  *--------------------------------------------------------------*/
+static void thin_get(struct thin_c *tc)
+{
+       atomic_inc(&tc->refcount);
+}
+
+static void thin_put(struct thin_c *tc)
+{
+       if (atomic_dec_and_test(&tc->refcount))
+               complete(&tc->can_destroy);
+}
+
 static void thin_dtr(struct dm_target *ti)
 {
        struct thin_c *tc = ti->private;
        unsigned long flags;
 
+       thin_put(tc);
+       wait_for_completion(&tc->can_destroy);
+
        spin_lock_irqsave(&tc->pool->lock, flags);
        list_del_rcu(&tc->list);
        spin_unlock_irqrestore(&tc->pool->lock, flags);
@@ -3101,6 +3162,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
        struct thin_c *tc;
        struct dm_dev *pool_dev, *origin_dev;
        struct mapped_device *pool_md;
+       unsigned long flags;
 
        mutex_lock(&dm_thin_pool_table.mutex);
 
@@ -3191,9 +3253,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
 
        mutex_unlock(&dm_thin_pool_table.mutex);
 
-       spin_lock(&tc->pool->lock);
+       atomic_set(&tc->refcount, 1);
+       init_completion(&tc->can_destroy);
+
+       spin_lock_irqsave(&tc->pool->lock, flags);
        list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
-       spin_unlock(&tc->pool->lock);
+       spin_unlock_irqrestore(&tc->pool->lock, flags);
        /*
         * This synchronize_rcu() call is needed here otherwise we risk a
         * wake_worker() call finding no bios to process (because the newly
index 796007a5e0e1a4b6e83b0871c1fca1ef8c0c461f..7a7bab8947ae3485d31c132cb3398251c7d507cf 100644 (file)
@@ -330,15 +330,17 @@ test_block_hash:
                                return r;
                        }
                }
-
                todo = 1 << v->data_dev_block_bits;
-               while (io->iter.bi_size) {
+               do {
                        u8 *page;
+                       unsigned len;
                        struct bio_vec bv = bio_iter_iovec(bio, io->iter);
 
                        page = kmap_atomic(bv.bv_page);
-                       r = crypto_shash_update(desc, page + bv.bv_offset,
-                                               bv.bv_len);
+                       len = bv.bv_len;
+                       if (likely(len >= todo))
+                               len = todo;
+                       r = crypto_shash_update(desc, page + bv.bv_offset, len);
                        kunmap_atomic(page);
 
                        if (r < 0) {
@@ -346,8 +348,9 @@ test_block_hash:
                                return r;
                        }
 
-                       bio_advance_iter(bio, &io->iter, bv.bv_len);
-               }
+                       bio_advance_iter(bio, &io->iter, len);
+                       todo -= len;
+               } while (todo);
 
                if (!v->version) {
                        r = crypto_shash_update(desc, v->salt, v->salt_size);
index 7ff473c871a9a249bd02007c2176ec5c471d7626..8d659e6a1b4c0899e32706b8bfa7fe3270ff715a 100644 (file)
@@ -431,7 +431,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
         * Create one workqueue per volume (per registered block device).
         * Rembember workqueues are cheap, they're not threads.
         */
-       dev->wq = alloc_workqueue(gd->disk_name, 0, 0);
+       dev->wq = alloc_workqueue("%s", 0, 0, gd->disk_name);
        if (!dev->wq)
                goto out_free_queue;
        INIT_WORK(&dev->work, ubiblock_do_work);
index 02317c1c02385914c94175fa8757089c677e2b94..0f3425dac91046300f93587d4f341e080c98e322 100644 (file)
@@ -671,6 +671,8 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
 
        e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
        self_check_in_wl_tree(ubi, e, &ubi->free);
+       ubi->free_count--;
+       ubi_assert(ubi->free_count >= 0);
        rb_erase(&e->u.rb, &ubi->free);
 
        return e;
@@ -684,6 +686,9 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
        peb = __wl_get_peb(ubi);
        spin_unlock(&ubi->wl_lock);
 
+       if (peb < 0)
+               return peb;
+
        err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
                                    ubi->peb_size - ubi->vid_hdr_aloffset);
        if (err) {
@@ -1068,6 +1073,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
 
                        /* Give the unused PEB back */
                        wl_tree_add(e2, &ubi->free);
+                       ubi->free_count++;
                        goto out_cancel;
                }
                self_check_in_wl_tree(ubi, e1, &ubi->used);
index 9bcf2cf19357837dcb6dd8e5cce637150a7a32d0..5aeb89411350a4c98a2d769302e16a8d650d075f 100644 (file)
@@ -364,7 +364,7 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 
                memset(r, 0, sizeof(*r));
                /*
-                * Get optional "interrupts-names" property to add a name
+                * Get optional "interrupt-names" property to add a name
                 * to the resource.
                 */
                of_property_read_string_index(dev, "interrupt-names", index,
@@ -379,6 +379,32 @@ int of_irq_to_resource(struct device_node *dev, int index, struct resource *r)
 }
 EXPORT_SYMBOL_GPL(of_irq_to_resource);
 
+/**
+ * of_irq_get - Decode a node's IRQ and return it as a Linux irq number
+ * @dev: pointer to device tree node
+ * @index: zero-based index of the irq
+ *
+ * Returns Linux irq number on success, or -EPROBE_DEFER if the irq domain
+ * is not yet created.
+ *
+ */
+int of_irq_get(struct device_node *dev, int index)
+{
+       int rc;
+       struct of_phandle_args oirq;
+       struct irq_domain *domain;
+
+       rc = of_irq_parse_one(dev, index, &oirq);
+       if (rc)
+               return rc;
+
+       domain = irq_find_host(oirq.np);
+       if (!domain)
+               return -EPROBE_DEFER;
+
+       return irq_create_of_mapping(&oirq);
+}
+
 /**
  * of_irq_count - Count the number of IRQs a node uses
  * @dev: pointer to device tree node
index 404d1daebefa7d7a5d02cbfb611e9585077c00b0..bd47fbc53dc96258fba942d073ee320f25e5627f 100644 (file)
@@ -168,7 +168,9 @@ struct platform_device *of_device_alloc(struct device_node *np,
                        rc = of_address_to_resource(np, i, res);
                        WARN_ON(rc);
                }
-               WARN_ON(of_irq_to_resource_table(np, res, num_irq) != num_irq);
+               if (of_irq_to_resource_table(np, res, num_irq) != num_irq)
+                       pr_debug("not all legacy IRQ resources mapped for %s\n",
+                                np->name);
        }
 
        dev->dev.of_node = of_node_get(np);
index ae4450070503f1067579f02576ce5ec14b2a7192..fe70b86bcffb9d086edd51c758a17a60ca45cf7b 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/of_irq.h>
+#include <linux/of_platform.h>
 #include <linux/list.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
@@ -427,6 +428,36 @@ static void __init of_selftest_match_node(void)
        }
 }
 
+static void __init of_selftest_platform_populate(void)
+{
+       int irq;
+       struct device_node *np;
+       struct platform_device *pdev;
+
+       np = of_find_node_by_path("/testcase-data");
+       of_platform_populate(np, of_default_bus_match_table, NULL, NULL);
+
+       /* Test that a missing irq domain returns -EPROBE_DEFER */
+       np = of_find_node_by_path("/testcase-data/testcase-device1");
+       pdev = of_find_device_by_node(np);
+       if (!pdev)
+               selftest(0, "device 1 creation failed\n");
+       irq = platform_get_irq(pdev, 0);
+       if (irq != -EPROBE_DEFER)
+               selftest(0, "device deferred probe failed - %d\n", irq);
+
+       /* Test that a parsing failure does not return -EPROBE_DEFER */
+       np = of_find_node_by_path("/testcase-data/testcase-device2");
+       pdev = of_find_device_by_node(np);
+       if (!pdev)
+               selftest(0, "device 2 creation failed\n");
+       irq = platform_get_irq(pdev, 0);
+       if (irq >= 0 || irq == -EPROBE_DEFER)
+               selftest(0, "device parsing error failed - %d\n", irq);
+
+       selftest(1, "passed");
+}
+
 static int __init of_selftest(void)
 {
        struct device_node *np;
@@ -445,6 +476,7 @@ static int __init of_selftest(void)
        of_selftest_parse_interrupts();
        of_selftest_parse_interrupts_extended();
        of_selftest_match_node();
+       of_selftest_platform_populate();
        pr_info("end of selftest - %i passed, %i failed\n",
                selftest_results.passed, selftest_results.failed);
        return 0;
index c843720bd3e53d7e7f632f4c75152edbefbc7d5f..da4695f60351ad4c7c24aecf791a5a6c2b71edfc 100644 (file)
                                                      <&test_intmap1 1 2>;
                        };
                };
+
+               testcase-device1 {
+                       compatible = "testcase-device";
+                       interrupt-parent = <&test_intc0>;
+                       interrupts = <1>;
+               };
+
+               testcase-device2 {
+                       compatible = "testcase-device";
+                       interrupt-parent = <&test_intc2>;
+                       interrupts = <1>; /* invalid specifier - too short */
+               };
        };
+
 };
index 92ed4b2e3c0716cf3f21580b49434fc6e9952fc6..c862f9c0e9ce4cb356222b496c40ca1ac26b0699 100644 (file)
@@ -64,7 +64,6 @@ struct as3722_pin_function {
 };
 
 struct as3722_gpio_pin_control {
-       bool enable_gpio_invert;
        unsigned mode_prop;
        int io_function;
 };
@@ -320,10 +319,8 @@ static int as3722_pinctrl_gpio_set_direction(struct pinctrl_dev *pctldev,
                return mode;
        }
 
-       if (as_pci->gpio_control[offset].enable_gpio_invert)
-               mode |= AS3722_GPIO_INV;
-
-       return as3722_write(as3722, AS3722_GPIOn_CONTROL_REG(offset), mode);
+       return as3722_update_bits(as3722, AS3722_GPIOn_CONTROL_REG(offset),
+                               AS3722_GPIO_MODE_MASK, mode);
 }
 
 static const struct pinmux_ops as3722_pinmux_ops = {
@@ -496,10 +493,18 @@ static void as3722_gpio_set(struct gpio_chip *chip, unsigned offset,
 {
        struct as3722_pctrl_info *as_pci = to_as_pci(chip);
        struct as3722 *as3722 = as_pci->as3722;
-       int en_invert = as_pci->gpio_control[offset].enable_gpio_invert;
+       int en_invert;
        u32 val;
        int ret;
 
+       ret = as3722_read(as3722, AS3722_GPIOn_CONTROL_REG(offset), &val);
+       if (ret < 0) {
+               dev_err(as_pci->dev,
+                       "GPIO_CONTROL%d_REG read failed: %d\n", offset, ret);
+               return;
+       }
+       en_invert = !!(val & AS3722_GPIO_INV);
+
        if (value)
                val = (en_invert) ? 0 : AS3722_GPIOn_SIGNAL(offset);
        else
index 81075f2a1d3f87d9ac9d2cf4d62edf94e21bf75f..2960557bfed95c6d79f316c020ec98c426f38f83 100644 (file)
@@ -810,6 +810,7 @@ static const struct pinconf_ops pcs_pinconf_ops = {
 static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
                unsigned pin_pos)
 {
+       struct pcs_soc_data *pcs_soc = &pcs->socdata;
        struct pinctrl_pin_desc *pin;
        struct pcs_name *pn;
        int i;
@@ -821,6 +822,18 @@ static int pcs_add_pin(struct pcs_device *pcs, unsigned offset,
                return -ENOMEM;
        }
 
+       if (pcs_soc->irq_enable_mask) {
+               unsigned val;
+
+               val = pcs->read(pcs->base + offset);
+               if (val & pcs_soc->irq_enable_mask) {
+                       dev_dbg(pcs->dev, "irq enabled at boot for pin at %lx (%x), clearing\n",
+                               (unsigned long)pcs->res->start + offset, val);
+                       val &= ~pcs_soc->irq_enable_mask;
+                       pcs->write(val, pcs->base + offset);
+               }
+       }
+
        pin = &pcs->pins.pa[i];
        pn = &pcs->names[i];
        sprintf(pn->name, "%lx.%d",
index c5e0f6973a3b06c3e197eccbe7ba917a27d1c3ec..26ca6855f478d3018f79ab8aab87550e1eb52610 100644 (file)
@@ -629,9 +629,8 @@ static int tb10x_gpio_request_enable(struct pinctrl_dev *pctl,
         */
        for (i = 0; i < state->pinfuncgrpcnt; i++) {
                const struct tb10x_pinfuncgrp *pfg = &state->pingroups[i];
-               unsigned int port = pfg->port;
                unsigned int mode = pfg->mode;
-               int j;
+               int j, port = pfg->port;
 
                /*
                 * Skip pin groups which are always mapped and don't need
index 48093719167abd91e27f93eb869eab3f1edd5c51..f5cd3f9618083bacca6414489db5e83b3dc128d9 100644 (file)
@@ -4794,8 +4794,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
                FN_MSIOF0_SCK_B, 0,
                /* IP5_23_21 [3] */
                FN_WE1_N, FN_IERX, FN_CAN1_RX, FN_VI1_G4,
-               FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B,
-               FN_IERX_C, 0,
+               FN_VI1_G4_B, FN_VI2_R6, FN_SCIFA0_CTS_N_B, FN_IERX_C,
                /* IP5_20_18 [3] */
                FN_WE0_N, FN_IECLK, FN_CAN_CLK,
                FN_VI2_VSYNC_N, FN_SCIFA0_TXD_B, FN_VI2_VSYNC_N_B, 0, 0,
index 5186d70c49d43326bc0a3e1f0405332d512cb989..7868bf3a0f911dccfbe7b516c469ac63f6422e4c 100644 (file)
@@ -5288,7 +5288,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
                /* SEL_SCIF3 [2] */
                FN_SEL_SCIF3_0, FN_SEL_SCIF3_1, FN_SEL_SCIF3_2, FN_SEL_SCIF3_3,
                /* SEL_IEB [2] */
-               FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2,
+               FN_SEL_IEB_0, FN_SEL_IEB_1, FN_SEL_IEB_2, 0,
                /* SEL_MMC [1] */
                FN_SEL_MMC_0, FN_SEL_MMC_1,
                /* SEL_SCIF5 [1] */
index 9f611cbbc294ea8c5ae84023e132e02152e36f85..c31aa07b3ba55541ff434adf45aed76adb0adee3 100644 (file)
@@ -83,8 +83,7 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
 {
        struct acpi_device *acpi_dev;
        acpi_handle handle;
-       struct acpi_buffer buffer;
-       int ret;
+       int ret = 0;
 
        pnp_dbg(&dev->dev, "set resources\n");
 
@@ -97,19 +96,26 @@ static int pnpacpi_set_resources(struct pnp_dev *dev)
        if (WARN_ON_ONCE(acpi_dev != dev->data))
                dev->data = acpi_dev;
 
-       ret = pnpacpi_build_resource_template(dev, &buffer);
-       if (ret)
-               return ret;
-       ret = pnpacpi_encode_resources(dev, &buffer);
-       if (ret) {
+       if (acpi_has_method(handle, METHOD_NAME__SRS)) {
+               struct acpi_buffer buffer;
+
+               ret = pnpacpi_build_resource_template(dev, &buffer);
+               if (ret)
+                       return ret;
+
+               ret = pnpacpi_encode_resources(dev, &buffer);
+               if (!ret) {
+                       acpi_status status;
+
+                       status = acpi_set_current_resources(handle, &buffer);
+                       if (ACPI_FAILURE(status))
+                               ret = -EIO;
+               }
                kfree(buffer.pointer);
-               return ret;
        }
-       if (ACPI_FAILURE(acpi_set_current_resources(handle, &buffer)))
-               ret = -EINVAL;
-       else if (acpi_bus_power_manageable(handle))
+       if (!ret && acpi_bus_power_manageable(handle))
                ret = acpi_bus_set_power(handle, ACPI_STATE_D0);
-       kfree(buffer.pointer);
+
        return ret;
 }
 
@@ -117,7 +123,7 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
 {
        struct acpi_device *acpi_dev;
        acpi_handle handle;
-       int ret;
+       acpi_status status;
 
        dev_dbg(&dev->dev, "disable resources\n");
 
@@ -128,13 +134,15 @@ static int pnpacpi_disable_resources(struct pnp_dev *dev)
        }
 
        /* acpi_unregister_gsi(pnp_irq(dev, 0)); */
-       ret = 0;
        if (acpi_bus_power_manageable(handle))
                acpi_bus_set_power(handle, ACPI_STATE_D3_COLD);
-               /* continue even if acpi_bus_set_power() fails */
-       if (ACPI_FAILURE(acpi_evaluate_object(handle, "_DIS", NULL, NULL)))
-               ret = -ENODEV;
-       return ret;
+
+       /* continue even if acpi_bus_set_power() fails */
+       status = acpi_evaluate_object(handle, "_DIS", NULL, NULL);
+       if (ACPI_FAILURE(status) && status != AE_NOT_FOUND)
+               return -ENODEV;
+
+       return 0;
 }
 
 #ifdef CONFIG_ACPI_SLEEP
index 3736bc408adba363f05e54da1762a69a00d3252b..ebf0d6710b5a0d4150627313a3edca99e50a8745 100644 (file)
@@ -335,7 +335,7 @@ static void quirk_amd_mmconfig_area(struct pnp_dev *dev)
 }
 #endif
 
-#ifdef CONFIG_X86
+#ifdef CONFIG_PCI
 /* Device IDs of parts that have 32KB MCH space */
 static const unsigned int mch_quirk_devices[] = {
        0x0154, /* Ivy Bridge */
@@ -440,7 +440,7 @@ static struct pnp_fixup pnp_fixups[] = {
 #ifdef CONFIG_AMD_NB
        {"PNP0c01", quirk_amd_mmconfig_area},
 #endif
-#ifdef CONFIG_X86
+#ifdef CONFIG_PCI
        {"PNP0c02", quirk_intel_mch},
 #endif
        {""}
index 9f0ea6cb6922619dfe04803c284002431110e11f..e3bf885f4a6c29fd77f29b51c219a47c6a04ca7e 100644 (file)
@@ -541,18 +541,27 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area)
 
 static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
 {
-       do {
+       static int ntsm_unsupported;
+
+       while (true) {
                memset(sei, 0, sizeof(*sei));
                sei->request.length = 0x0010;
                sei->request.code = 0x000e;
-               sei->ntsm = ntsm;
+               if (!ntsm_unsupported)
+                       sei->ntsm = ntsm;
 
                if (chsc(sei))
                        break;
 
                if (sei->response.code != 0x0001) {
-                       CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
-                                     sei->response.code);
+                       CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x, ntsm=%llx)\n",
+                                     sei->response.code, sei->ntsm);
+
+                       if (sei->response.code == 3 && sei->ntsm) {
+                               /* Fallback for old firmware. */
+                               ntsm_unsupported = 1;
+                               continue;
+                       }
                        break;
                }
 
@@ -568,7 +577,10 @@ static void chsc_process_event_information(struct chsc_sei *sei, u64 ntsm)
                        CIO_CRW_EVENT(2, "chsc: unhandled nt: %d\n", sei->nt);
                        break;
                }
-       } while (sei->u.nt0_area.flags & 0x80);
+
+               if (!(sei->u.nt0_area.flags & 0x80))
+                       break;
+       }
 }
 
 /*
index 7f0af4fcc0019127ab4d60fc0550ed6daa86ffb6..6fd7d40b2c4dea102e15a2e9c76fef3500c09435 100644 (file)
@@ -8293,7 +8293,6 @@ _scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 
        mpt2sas_base_free_resources(ioc);
        pci_save_state(pdev);
-       pci_disable_device(pdev);
        pci_set_power_state(pdev, device_state);
        return 0;
 }
index 16bfd50cd3fe65644c5443698d3aa3e96dfd5925..db3b494e5926a423866e0ad3a18b15b6378d3cca 100644 (file)
@@ -750,8 +750,12 @@ static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
 
                vscsi->affinity_hint_set = true;
        } else {
-               for (i = 0; i < vscsi->num_queues; i++)
+               for (i = 0; i < vscsi->num_queues; i++) {
+                       if (!vscsi->req_vqs[i].vq)
+                               continue;
+
                        virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
+               }
 
                vscsi->affinity_hint_set = false;
        }
index 12a3de0ee6dacbdea873ec9ea28bdd88d1ea999d..a0ed6c7d2cd2a3e91a5d12e48af705d75afe315a 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -112,6 +112,11 @@ struct kioctx {
 
        struct work_struct      free_work;
 
+       /*
+        * signals when all in-flight requests are done
+        */
+       struct completion *requests_done;
+
        struct {
                /*
                 * This counts the number of available slots in the ringbuffer,
@@ -508,6 +513,10 @@ static void free_ioctx_reqs(struct percpu_ref *ref)
 {
        struct kioctx *ctx = container_of(ref, struct kioctx, reqs);
 
+       /* At this point we know that there are no any in-flight requests */
+       if (ctx->requests_done)
+               complete(ctx->requests_done);
+
        INIT_WORK(&ctx->free_work, free_ioctx);
        schedule_work(&ctx->free_work);
 }
@@ -718,7 +727,8 @@ err:
  *     when the processes owning a context have all exited to encourage
  *     the rapid destruction of the kioctx.
  */
-static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
+static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx,
+               struct completion *requests_done)
 {
        if (!atomic_xchg(&ctx->dead, 1)) {
                struct kioctx_table *table;
@@ -747,7 +757,11 @@ static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx)
                if (ctx->mmap_size)
                        vm_munmap(ctx->mmap_base, ctx->mmap_size);
 
+               ctx->requests_done = requests_done;
                percpu_ref_kill(&ctx->users);
+       } else {
+               if (requests_done)
+                       complete(requests_done);
        }
 }
 
@@ -809,7 +823,7 @@ void exit_aio(struct mm_struct *mm)
                 */
                ctx->mmap_size = 0;
 
-               kill_ioctx(mm, ctx);
+               kill_ioctx(mm, ctx, NULL);
        }
 }
 
@@ -1185,7 +1199,7 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
        if (!IS_ERR(ioctx)) {
                ret = put_user(ioctx->user_id, ctxp);
                if (ret)
-                       kill_ioctx(current->mm, ioctx);
+                       kill_ioctx(current->mm, ioctx, NULL);
                percpu_ref_put(&ioctx->users);
        }
 
@@ -1203,8 +1217,22 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx)
 {
        struct kioctx *ioctx = lookup_ioctx(ctx);
        if (likely(NULL != ioctx)) {
-               kill_ioctx(current->mm, ioctx);
+               struct completion requests_done =
+                       COMPLETION_INITIALIZER_ONSTACK(requests_done);
+
+               /* Pass requests_done to kill_ioctx() where it can be set
+                * in a thread-safe way. If we try to set it here then we have
+                * a race condition if two io_destroy() called simultaneously.
+                */
+               kill_ioctx(current->mm, ioctx, &requests_done);
                percpu_ref_put(&ioctx->users);
+
+               /* Wait until all IO for the context are done. Otherwise kernel
+                * keep using user-space buffers even if user thinks the context
+                * is destroyed.
+                */
+               wait_for_completion(&requests_done);
+
                return 0;
        }
        pr_debug("EINVAL: io_destroy: invalid context id\n");
@@ -1299,10 +1327,8 @@ rw_common:
                                                &iovec, compat)
                        : aio_setup_single_vector(req, rw, buf, &nr_segs,
                                                  iovec);
-               if (ret)
-                       return ret;
-
-               ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
+               if (!ret)
+                       ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes);
                if (ret < 0) {
                        if (iovec != &inline_vec)
                                kfree(iovec);
index 2e5e648eb5c3dc3bd82bea5ce8dead051864cf75..c561b628ebce519d111d159f541b9df88242a5b1 100644 (file)
@@ -3261,7 +3261,7 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
                        rel->seq = cpu_to_le32(cap->seq);
                        rel->issue_seq = cpu_to_le32(cap->issue_seq),
                        rel->mseq = cpu_to_le32(cap->mseq);
-                       rel->caps = cpu_to_le32(cap->issued);
+                       rel->caps = cpu_to_le32(cap->implemented);
                        rel->wanted = cpu_to_le32(cap->mds_wanted);
                        rel->dname_len = 0;
                        rel->dname_seq = 0;
index 766410a12c2cb209a224fcfd97f63a055fd20801..c29d6ae6887489c29902bec33c4118d4d807e9dc 100644 (file)
@@ -141,7 +141,7 @@ static int __dcache_readdir(struct file *file,  struct dir_context *ctx,
 
        /* start at beginning? */
        if (ctx->pos == 2 || last == NULL ||
-           ctx->pos < ceph_dentry(last)->offset) {
+           fpos_cmp(ctx->pos, ceph_dentry(last)->offset) < 0) {
                if (list_empty(&parent->d_subdirs))
                        goto out_unlock;
                p = parent->d_subdirs.prev;
@@ -182,9 +182,16 @@ more:
        spin_unlock(&dentry->d_lock);
        spin_unlock(&parent->d_lock);
 
+       /* make sure a dentry wasn't dropped while we didn't have parent lock */
+       if (!ceph_dir_is_complete(dir)) {
+               dout(" lost dir complete on %p; falling back to mds\n", dir);
+               dput(dentry);
+               err = -EAGAIN;
+               goto out;
+       }
+
        dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
             dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
-       ctx->pos = di->offset;
        if (!dir_emit(ctx, dentry->d_name.name,
                      dentry->d_name.len,
                      ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
@@ -198,19 +205,12 @@ more:
                return 0;
        }
 
+       ctx->pos = di->offset + 1;
+
        if (last)
                dput(last);
        last = dentry;
 
-       ctx->pos++;
-
-       /* make sure a dentry wasn't dropped while we didn't have parent lock */
-       if (!ceph_dir_is_complete(dir)) {
-               dout(" lost dir complete on %p; falling back to mds\n", dir);
-               err = -EAGAIN;
-               goto out;
-       }
-
        spin_lock(&parent->d_lock);
        p = p->prev;    /* advance to next dentry */
        goto more;
@@ -296,6 +296,8 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
                err = __dcache_readdir(file, ctx, shared_gen);
                if (err != -EAGAIN)
                        return err;
+               frag = fpos_frag(ctx->pos);
+               off = fpos_off(ctx->pos);
        } else {
                spin_unlock(&ci->i_ceph_lock);
        }
@@ -446,7 +448,6 @@ more:
        if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
                dout(" marking %p complete\n", inode);
                __ceph_dir_set_complete(ci, fi->dir_release_count);
-               ci->i_max_offset = ctx->pos;
        }
        spin_unlock(&ci->i_ceph_lock);
 
@@ -935,14 +936,16 @@ static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
                 * to do it here.
                 */
 
-               /* d_move screws up d_subdirs order */
-               ceph_dir_clear_complete(new_dir);
-
                d_move(old_dentry, new_dentry);
 
                /* ensure target dentry is invalidated, despite
                   rehashing bug in vfs_rename_dir */
                ceph_invalidate_dentry_lease(new_dentry);
+
+               /* d_move screws up sibling dentries' offsets */
+               ceph_dir_clear_complete(old_dir);
+               ceph_dir_clear_complete(new_dir);
+
        }
        ceph_mdsc_put_request(req);
        return err;
index 0b0728e5be2d7cba589a935159b88f9d26f0b2e9..233c6f96910abc78d2b120e4e30a44ba0009b88a 100644 (file)
@@ -744,7 +744,6 @@ static int fill_inode(struct inode *inode,
            !__ceph_dir_is_complete(ci)) {
                dout(" marking %p complete (empty)\n", inode);
                __ceph_dir_set_complete(ci, atomic_read(&ci->i_release_count));
-               ci->i_max_offset = 2;
        }
 no_change:
        /* only update max_size on auth cap */
@@ -889,41 +888,6 @@ out_unlock:
        return;
 }
 
-/*
- * Set dentry's directory position based on the current dir's max, and
- * order it in d_subdirs, so that dcache_readdir behaves.
- *
- * Always called under directory's i_mutex.
- */
-static void ceph_set_dentry_offset(struct dentry *dn)
-{
-       struct dentry *dir = dn->d_parent;
-       struct inode *inode = dir->d_inode;
-       struct ceph_inode_info *ci;
-       struct ceph_dentry_info *di;
-
-       BUG_ON(!inode);
-
-       ci = ceph_inode(inode);
-       di = ceph_dentry(dn);
-
-       spin_lock(&ci->i_ceph_lock);
-       if (!__ceph_dir_is_complete(ci)) {
-               spin_unlock(&ci->i_ceph_lock);
-               return;
-       }
-       di->offset = ceph_inode(inode)->i_max_offset++;
-       spin_unlock(&ci->i_ceph_lock);
-
-       spin_lock(&dir->d_lock);
-       spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
-       list_move(&dn->d_u.d_child, &dir->d_subdirs);
-       dout("set_dentry_offset %p %lld (%p %p)\n", dn, di->offset,
-            dn->d_u.d_child.prev, dn->d_u.d_child.next);
-       spin_unlock(&dn->d_lock);
-       spin_unlock(&dir->d_lock);
-}
-
 /*
  * splice a dentry to an inode.
  * caller must hold directory i_mutex for this to be safe.
@@ -933,7 +897,7 @@ static void ceph_set_dentry_offset(struct dentry *dn)
  * the caller) if we fail.
  */
 static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
-                                   bool *prehash, bool set_offset)
+                                   bool *prehash)
 {
        struct dentry *realdn;
 
@@ -965,8 +929,6 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in,
        }
        if ((!prehash || *prehash) && d_unhashed(dn))
                d_rehash(dn);
-       if (set_offset)
-               ceph_set_dentry_offset(dn);
 out:
        return dn;
 }
@@ -987,7 +949,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
 {
        struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
        struct inode *in = NULL;
-       struct ceph_mds_reply_inode *ininfo;
        struct ceph_vino vino;
        struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
        int err = 0;
@@ -1161,6 +1122,9 @@ retry_lookup:
 
                /* rename? */
                if (req->r_old_dentry && req->r_op == CEPH_MDS_OP_RENAME) {
+                       struct inode *olddir = req->r_old_dentry_dir;
+                       BUG_ON(!olddir);
+
                        dout(" src %p '%.*s' dst %p '%.*s'\n",
                             req->r_old_dentry,
                             req->r_old_dentry->d_name.len,
@@ -1180,13 +1144,10 @@ retry_lookup:
                           rehashing bug in vfs_rename_dir */
                        ceph_invalidate_dentry_lease(dn);
 
-                       /*
-                        * d_move() puts the renamed dentry at the end of
-                        * d_subdirs.  We need to assign it an appropriate
-                        * directory offset so we can behave when dir is
-                        * complete.
-                        */
-                       ceph_set_dentry_offset(req->r_old_dentry);
+                       /* d_move screws up sibling dentries' offsets */
+                       ceph_dir_clear_complete(dir);
+                       ceph_dir_clear_complete(olddir);
+
                        dout("dn %p gets new offset %lld\n", req->r_old_dentry,
                             ceph_dentry(req->r_old_dentry)->offset);
 
@@ -1213,8 +1174,9 @@ retry_lookup:
 
                /* attach proper inode */
                if (!dn->d_inode) {
+                       ceph_dir_clear_complete(dir);
                        ihold(in);
-                       dn = splice_dentry(dn, in, &have_lease, true);
+                       dn = splice_dentry(dn, in, &have_lease);
                        if (IS_ERR(dn)) {
                                err = PTR_ERR(dn);
                                goto done;
@@ -1235,17 +1197,16 @@ retry_lookup:
                   (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
                    req->r_op == CEPH_MDS_OP_MKSNAP)) {
                struct dentry *dn = req->r_dentry;
+               struct inode *dir = req->r_locked_dir;
 
                /* fill out a snapdir LOOKUPSNAP dentry */
                BUG_ON(!dn);
-               BUG_ON(!req->r_locked_dir);
-               BUG_ON(ceph_snap(req->r_locked_dir) != CEPH_SNAPDIR);
-               ininfo = rinfo->targeti.in;
-               vino.ino = le64_to_cpu(ininfo->ino);
-               vino.snap = le64_to_cpu(ininfo->snapid);
+               BUG_ON(!dir);
+               BUG_ON(ceph_snap(dir) != CEPH_SNAPDIR);
                dout(" linking snapped dir %p to dn %p\n", in, dn);
+               ceph_dir_clear_complete(dir);
                ihold(in);
-               dn = splice_dentry(dn, in, NULL, true);
+               dn = splice_dentry(dn, in, NULL);
                if (IS_ERR(dn)) {
                        err = PTR_ERR(dn);
                        goto done;
@@ -1407,7 +1368,7 @@ retry_lookup:
                }
 
                if (!dn->d_inode) {
-                       dn = splice_dentry(dn, in, NULL, false);
+                       dn = splice_dentry(dn, in, NULL);
                        if (IS_ERR(dn)) {
                                err = PTR_ERR(dn);
                                dn = NULL;
index fdf941b44ff103a2590a3804aa850e468ec980d6..a822a6e58290bbedfb0e363bf3bbb601075891fb 100644 (file)
@@ -109,6 +109,8 @@ static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
                return PTR_ERR(req);
        req->r_inode = inode;
        ihold(inode);
+       req->r_num_caps = 1;
+
        req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL;
 
        req->r_args.setlayout.layout.fl_stripe_unit =
@@ -153,6 +155,7 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
                return PTR_ERR(req);
        req->r_inode = inode;
        ihold(inode);
+       req->r_num_caps = 1;
 
        req->r_args.setlayout.layout.fl_stripe_unit =
                        cpu_to_le32(l.stripe_unit);
index d94ba0df9f4d195cabf677fcdcd41cc01096c7e7..191398852a2e8927b1ac193b53ceea623829caf1 100644 (file)
@@ -45,6 +45,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct file *file,
                return PTR_ERR(req);
        req->r_inode = inode;
        ihold(inode);
+       req->r_num_caps = 1;
 
        /* mds requires start and length rather than start and end */
        if (LLONG_MAX == fl->fl_end)
index 7866cd05a6bbee4afd2478f3737d0ccd8ac28975..ead05cc1f447562271578131ab25769257080915 100644 (file)
@@ -266,7 +266,6 @@ struct ceph_inode_info {
        struct timespec i_rctime;
        u64 i_rbytes, i_rfiles, i_rsubdirs;
        u64 i_files, i_subdirs;
-       u64 i_max_offset;  /* largest readdir offset, set with complete dir */
 
        struct rb_root i_fragtree;
        struct mutex i_fragtree_mutex;
index a1266089eca1fc0054065dfc723e697daf5691e6..a81c7b556896115a4afbdea5452523057ccd195f 100644 (file)
@@ -1556,7 +1556,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
        if (c->space_fixup) {
                err = ubifs_fixup_free_space(c);
                if (err)
-                       return err;
+                       goto out;
        }
 
        err = check_free_space(c);
index 5a64ca4621f3f650e3c6718137a8ea549198d171..f23174fb9ec4340378df59b5cc89b43ecf342bec 100644 (file)
@@ -93,5 +93,8 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
 #define set_fixmap_io(idx, phys) \
        __set_fixmap(idx, phys, FIXMAP_PAGE_IO)
 
+#define set_fixmap_offset_io(idx, phys) \
+       __set_fixmap_offset(idx, phys, FIXMAP_PAGE_IO)
+
 #endif /* __ASSEMBLY__ */
 #endif /* __ASM_GENERIC_FIXMAP_H */
index d96deb443f18403d50e813f7a82a478e92a6d064..94f9ea8abcae35af8ca36560403fbd25facb7c65 100644 (file)
@@ -50,7 +50,7 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
 }
 
 #ifndef zero_bytemask
-#define zero_bytemask(mask) (~0ul << __fls(mask) << 1)
+#define zero_bytemask(mask) (~1ul << __fls(mask))
 #endif
 
 #endif /* _ASM_WORD_AT_A_TIME_H */
index 9212b017bc7236cfc63afe5c268cc741995a1af4..ae9504b4b67d3026cd9c1c1fcd30a8cfc928c984 100644 (file)
@@ -535,6 +535,7 @@ static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_a
 extern int ftrace_arch_read_dyn_info(char *buf, int size);
 
 extern int skip_trace(unsigned long ip);
+extern void ftrace_module_init(struct module *mod);
 
 extern void ftrace_disable_daemon(void);
 extern void ftrace_enable_daemon(void);
@@ -544,6 +545,7 @@ static inline int ftrace_force_update(void) { return 0; }
 static inline void ftrace_disable_daemon(void) { }
 static inline void ftrace_enable_daemon(void) { }
 static inline void ftrace_release_mod(struct module *mod) {}
+static inline void ftrace_module_init(struct module *mod) {}
 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
 {
        return -EINVAL;
index 8834a7e5b944a04850ebd824b0dc6b94e944fb35..97ac926c78a707fb6bf45293d00e8f4d86515f43 100644 (file)
@@ -210,7 +210,7 @@ extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
 /**
  * irq_set_affinity - Set the irq affinity of a given irq
  * @irq:       Interrupt to set affinity
- * @mask:      cpumask
+ * @cpumask:   cpumask
  *
  * Fails if cpumask does not contain an online CPU
  */
@@ -223,7 +223,7 @@ irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 /**
  * irq_force_affinity - Force the irq affinity of a given irq
  * @irq:       Interrupt to set affinity
- * @mask:      cpumask
+ * @cpumask:   cpumask
  *
  * Same as irq_set_affinity, but without checking the mask against
  * online cpus.
index 10a0b1ac4ea078e3e8e1b5f3117ab47b0dfba936..5c57efb863d08e5937a36e06778efa8047060156 100644 (file)
@@ -603,6 +603,8 @@ static inline u32 irq_get_trigger_type(unsigned int irq)
        return d ? irqd_get_trigger_type(d) : 0;
 }
 
+unsigned int arch_dynirq_lower_bound(unsigned int from);
+
 int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
                struct module *owner);
 
index 3f23b4472c3150990237a566ba2d46acb1d1aa82..6404253d810d7482a64fa9e959e99c8bad05c912 100644 (file)
@@ -44,11 +44,16 @@ extern void of_irq_init(const struct of_device_id *matches);
 
 #ifdef CONFIG_OF_IRQ
 extern int of_irq_count(struct device_node *dev);
+extern int of_irq_get(struct device_node *dev, int index);
 #else
 static inline int of_irq_count(struct device_node *dev)
 {
        return 0;
 }
+static inline int of_irq_get(struct device_node *dev, int index)
+{
+       return 0;
+}
 #endif
 
 #if defined(CONFIG_OF)
index 11fd51b413de25a6a2415c1724dee458d3314ddc..ed0b2c599a64f7d701117bf54ba6a4dcd56edb31 100644 (file)
@@ -25,7 +25,7 @@ struct module;
        { (1UL << TAINT_OOT_MODULE),            "O" },          \
        { (1UL << TAINT_FORCED_MODULE),         "F" },          \
        { (1UL << TAINT_CRAP),                  "C" },          \
-       { (1UL << TAINT_UNSIGNED_MODULE),       "X" })
+       { (1UL << TAINT_UNSIGNED_MODULE),       "E" })
 
 TRACE_EVENT(module_load,
 
index d55092ceee2975c204bcb90e856f9b6504d577ac..6b715c0af1b117b5b61bd32629a00845f0313557 100644 (file)
@@ -234,6 +234,11 @@ again:
                        goto again;
                }
                timer->base = new_base;
+       } else {
+               if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
+                       cpu = this_cpu;
+                       goto again;
+               }
        }
        return new_base;
 }
@@ -569,6 +574,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 
        cpu_base->expires_next.tv64 = expires_next.tv64;
 
+       /*
+        * If a hang was detected in the last timer interrupt then we
+        * leave the hang delay active in the hardware. We want the
+        * system to make progress. That also prevents the following
+        * scenario:
+        * T1 expires 50ms from now
+        * T2 expires 5s from now
+        *
+        * T1 is removed, so this code is called and would reprogram
+        * the hardware to 5s from now. Any hrtimer_start after that
+        * will not reprogram the hardware due to hang_detected being
+        * set. So we'd effectivly block all timers until the T2 event
+        * fires.
+        */
+       if (cpu_base->hang_detected)
+               return;
+
        if (cpu_base->expires_next.tv64 != KTIME_MAX)
                tick_program_event(cpu_base->expires_next, 1);
 }
index a7174617616ba6b8f404a1c3f01cf8b7dd90cb4d..bb07f2928f4b9c2ca33803f712c8889ca5823907 100644 (file)
@@ -363,6 +363,13 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
                if (from > irq)
                        return -EINVAL;
                from = irq;
+       } else {
+               /*
+                * For interrupts which are freely allocated the
+                * architecture can force a lower bound to the @from
+                * argument. x86 uses this to exclude the GSI space.
+                */
+               from = arch_dynirq_lower_bound(from);
        }
 
        mutex_lock(&sparse_irq_lock);
index 11869408f79b86abe33e5194d0f5c705b44e9d81..079c4615607d6ed266330a5416529bfcc37e4db0 100644 (file)
@@ -815,9 +815,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
                return -EFAULT;
        name[MODULE_NAME_LEN-1] = '\0';
 
-       if (!(flags & O_NONBLOCK))
-               pr_warn("waiting module removal not supported: please upgrade\n");
-
        if (mutex_lock_interruptible(&module_mutex) != 0)
                return -EINTR;
 
@@ -3271,6 +3268,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
 
        dynamic_debug_setup(info->debug, info->num_debug);
 
+       /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
+       ftrace_module_init(mod);
+
        /* Finally it's fully formed, ready to start executing. */
        err = complete_formation(mod, info);
        if (err)
index b50990a5bea0220df9034f0bcc71d92e452edc78..33e4648ae0e7cd908671ef1a8ab60bbb562c097c 100644 (file)
@@ -779,3 +779,8 @@ int __init __weak arch_early_irq_init(void)
 {
        return 0;
 }
+
+unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
+{
+       return from;
+}
index 87bd529879c23bb12705fa0144cff354064f91dc..3bb01a323b2a3e0ae9291271f4dc0322f01bfd80 100644 (file)
@@ -838,7 +838,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
 
        bit = find_last_bit(&mask, BITS_PER_LONG);
 
-       mask = (1 << bit) - 1;
+       mask = (1UL << bit) - 1;
 
        expires_limit = expires_limit & ~(mask);
 
index 1fd4b9479210183762293944be777abb5435f8e3..4a54a25afa2fe67165cb6f65cd0b63c42eb724eb 100644 (file)
@@ -4330,16 +4330,11 @@ static void ftrace_init_module(struct module *mod,
        ftrace_process_locs(mod, start, end);
 }
 
-static int ftrace_module_notify_enter(struct notifier_block *self,
-                                     unsigned long val, void *data)
+void ftrace_module_init(struct module *mod)
 {
-       struct module *mod = data;
-
-       if (val == MODULE_STATE_COMING)
-               ftrace_init_module(mod, mod->ftrace_callsites,
-                                  mod->ftrace_callsites +
-                                  mod->num_ftrace_callsites);
-       return 0;
+       ftrace_init_module(mod, mod->ftrace_callsites,
+                          mod->ftrace_callsites +
+                          mod->num_ftrace_callsites);
 }
 
 static int ftrace_module_notify_exit(struct notifier_block *self,
@@ -4353,11 +4348,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
        return 0;
 }
 #else
-static int ftrace_module_notify_enter(struct notifier_block *self,
-                                     unsigned long val, void *data)
-{
-       return 0;
-}
 static int ftrace_module_notify_exit(struct notifier_block *self,
                                     unsigned long val, void *data)
 {
@@ -4365,11 +4355,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
 }
 #endif /* CONFIG_MODULES */
 
-struct notifier_block ftrace_module_enter_nb = {
-       .notifier_call = ftrace_module_notify_enter,
-       .priority = INT_MAX,    /* Run before anything that can use kprobes */
-};
-
 struct notifier_block ftrace_module_exit_nb = {
        .notifier_call = ftrace_module_notify_exit,
        .priority = INT_MIN,    /* Run after anything that can remove kprobes */
@@ -4403,10 +4388,6 @@ void __init ftrace_init(void)
                                  __start_mcount_loc,
                                  __stop_mcount_loc);
 
-       ret = register_module_notifier(&ftrace_module_enter_nb);
-       if (ret)
-               pr_warning("Failed to register trace ftrace module enter notifier\n");
-
        ret = register_module_notifier(&ftrace_module_exit_nb);
        if (ret)
                pr_warning("Failed to register trace ftrace module exit notifier\n");
index 925f537f07d17db7caae363dd39a20bd2296d2ee..4747b476a0300bc3c08ad82f97d6a7a10fb638c4 100644 (file)
@@ -77,7 +77,7 @@ event_triggers_call(struct ftrace_event_file *file, void *rec)
                        data->ops->func(data);
                        continue;
                }
-               filter = rcu_dereference(data->filter);
+               filter = rcu_dereference_sched(data->filter);
                if (filter && !filter_match_preds(filter, rec))
                        continue;
                if (data->cmd_ops->post_trigger) {
index d4224b397c0e4e4492fa135c3c5ee9b224872c07..1037a3bab50529f84c9d81c383df07dbfbbda081 100644 (file)
@@ -81,10 +81,12 @@ struct vm_area_struct *vmacache_find(struct mm_struct *mm, unsigned long addr)
        for (i = 0; i < VMACACHE_SIZE; i++) {
                struct vm_area_struct *vma = current->vmacache[i];
 
-               if (vma && vma->vm_start <= addr && vma->vm_end > addr) {
-                       BUG_ON(vma->vm_mm != mm);
+               if (!vma)
+                       continue;
+               if (WARN_ON_ONCE(vma->vm_mm != mm))
+                       break;
+               if (vma->vm_start <= addr && vma->vm_end > addr)
                        return vma;
-               }
        }
 
        return NULL;
index e632b5a52f5b89cb2e275b64494905cc7ebfc8e7..8b8a5a24b223ef268c28cf5e5ac5379314bba237 100644 (file)
@@ -1548,8 +1548,10 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
                return;
 
        for (i = 0; i < len; i++) {
-               if (osds[i] != CRUSH_ITEM_NONE &&
-                   osdmap->osd_primary_affinity[i] !=
+               int osd = osds[i];
+
+               if (osd != CRUSH_ITEM_NONE &&
+                   osdmap->osd_primary_affinity[osd] !=
                                        CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) {
                        break;
                }
@@ -1563,10 +1565,9 @@ static void apply_primary_affinity(struct ceph_osdmap *osdmap, u32 pps,
         * osd's pgs get rejected as primary.
         */
        for (i = 0; i < len; i++) {
-               int osd;
+               int osd = osds[i];
                u32 aff;
 
-               osd = osds[i];
                if (osd == CRUSH_ITEM_NONE)
                        continue;
 
index cc49062acdeecf85259f646df09abe22f1019a5d..1052d4834a44f502bda4f4f1ebe1202ace1608f5 100644 (file)
 #define EM_ARCOMPACT   93
 #endif
 
+#ifndef EM_XTENSA
+#define EM_XTENSA      94
+#endif
+
 #ifndef EM_AARCH64
 #define EM_AARCH64     183
 #endif
@@ -281,6 +285,7 @@ do_file(char const *const fname)
        case EM_AARCH64:
        case EM_MICROBLAZE:
        case EM_MIPS:
+       case EM_XTENSA:
                break;
        }  /* end switch */
 
index 248b90abb8825a62e9530a0629cbf432898898d3..480bbddbd801bf002e4cc43fb8c7c0f762ec40c8 100644 (file)
@@ -1059,24 +1059,26 @@ static void azx_init_cmd_io(struct azx *chip)
 
        /* reset the corb hw read pointer */
        azx_writew(chip, CORBRP, ICH6_CORBRP_RST);
-       for (timeout = 1000; timeout > 0; timeout--) {
-               if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
-                       break;
-               udelay(1);
-       }
-       if (timeout <= 0)
-               dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
-                       azx_readw(chip, CORBRP));
+       if (!(chip->driver_caps & AZX_DCAPS_CORBRP_SELF_CLEAR)) {
+               for (timeout = 1000; timeout > 0; timeout--) {
+                       if ((azx_readw(chip, CORBRP) & ICH6_CORBRP_RST) == ICH6_CORBRP_RST)
+                               break;
+                       udelay(1);
+               }
+               if (timeout <= 0)
+                       dev_err(chip->card->dev, "CORB reset timeout#1, CORBRP = %d\n",
+                               azx_readw(chip, CORBRP));
 
-       azx_writew(chip, CORBRP, 0);
-       for (timeout = 1000; timeout > 0; timeout--) {
-               if (azx_readw(chip, CORBRP) == 0)
-                       break;
-               udelay(1);
+               azx_writew(chip, CORBRP, 0);
+               for (timeout = 1000; timeout > 0; timeout--) {
+                       if (azx_readw(chip, CORBRP) == 0)
+                               break;
+                       udelay(1);
+               }
+               if (timeout <= 0)
+                       dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
+                               azx_readw(chip, CORBRP));
        }
-       if (timeout <= 0)
-               dev_err(chip->card->dev, "CORB reset timeout#2, CORBRP = %d\n",
-                       azx_readw(chip, CORBRP));
 
        /* enable corb dma */
        azx_writeb(chip, CORBCTL, ICH6_CORBCTL_RUN);
index d6bca62ef387b92b499dcf5954d5c783543055d1..b540ad71eb0d733ab217550a66ac40eb35e22da6 100644 (file)
@@ -249,7 +249,8 @@ enum {
 /* quirks for Nvidia */
 #define AZX_DCAPS_PRESET_NVIDIA \
        (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI |\
-        AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT)
+        AZX_DCAPS_ALIGN_BUFSIZE | AZX_DCAPS_NO_64BIT |\
+        AZX_DCAPS_CORBRP_SELF_CLEAR)
 
 #define AZX_DCAPS_PRESET_CTHDA \
        (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
index ba38b819f9847de7522de9171c1a7794aecc7a8f..4a7cb01fa91226b2cfd3a4a582d02d9899ffa6e0 100644 (file)
@@ -189,6 +189,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
 #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)  /* Take LPIB as delay */
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
 #define AZX_DCAPS_I915_POWERWELL (1 << 27)     /* HSW i915 powerwell support */
+#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28)  /* CORBRP clears itself after reset */
 
 /* position fix mode */
 enum {
index c643dfc0a82612c5a2672c5211e6e1d46c5102f3..c1952c9103398953ba0e05078f2f1a21763c8e90 100644 (file)
@@ -4621,6 +4621,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0667, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0668, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x0669, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0674, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x067f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
index f500905e9373510d2bcfdb583b0569cec5994708..2acf82f4a08a8bed4db26c6c444a0eabec688412 100644 (file)
@@ -1018,13 +1018,13 @@ static int alc5623_i2c_probe(struct i2c_client *client,
                dev_err(&client->dev, "failed to read vendor ID1: %d\n", ret);
                return ret;
        }
-       vid1 = ((vid1 & 0xff) << 8) | (vid1 >> 8);
 
        ret = regmap_read(alc5623->regmap, ALC5623_VENDOR_ID2, &vid2);
        if (ret < 0) {
                dev_err(&client->dev, "failed to read vendor ID2: %d\n", ret);
                return ret;
        }
+       vid2 >>= 8;
 
        if ((vid1 != 0x10ec) || (vid2 != id->driver_data)) {
                dev_err(&client->dev, "unknown or wrong codec\n");
index 460d35547a683d226521591333ce06fe1c5de634..2213a037c893107bcfa584701d58e0541bd59e13 100644 (file)
@@ -1229,8 +1229,10 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
        }
 
        if (cs42l52->pdata.reset_gpio) {
-               ret = gpio_request_one(cs42l52->pdata.reset_gpio,
-                                      GPIOF_OUT_INIT_HIGH, "CS42L52 /RST");
+               ret = devm_gpio_request_one(&i2c_client->dev,
+                                           cs42l52->pdata.reset_gpio,
+                                           GPIOF_OUT_INIT_HIGH,
+                                           "CS42L52 /RST");
                if (ret < 0) {
                        dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
                                cs42l52->pdata.reset_gpio, ret);
index 0ee60a19a26334dcae0484244fcf9d374965fc88..ae3717992d568fb2ba533634a25e306a9e8fd05b 100644 (file)
@@ -1443,8 +1443,10 @@ static int cs42l73_i2c_probe(struct i2c_client *i2c_client,
        i2c_set_clientdata(i2c_client, cs42l73);
 
        if (cs42l73->pdata.reset_gpio) {
-               ret = gpio_request_one(cs42l73->pdata.reset_gpio,
-                                      GPIOF_OUT_INIT_HIGH, "CS42L73 /RST");
+               ret = devm_gpio_request_one(&i2c_client->dev,
+                                           cs42l73->pdata.reset_gpio,
+                                           GPIOF_OUT_INIT_HIGH,
+                                           "CS42L73 /RST");
                if (ret < 0) {
                        dev_err(&i2c_client->dev, "Failed to request /RST %d: %d\n",
                                cs42l73->pdata.reset_gpio, ret);
index b1835103e9b4002ab44429d40bb16da8372f65aa..d7349bc89ad3085430b57eb0b67a14ea8b3886e6 100644 (file)
@@ -1399,7 +1399,6 @@ static int aic3x_probe(struct snd_soc_codec *codec)
        }
 
        aic3x_add_widgets(codec);
-       list_add(&aic3x->list, &reset_list);
 
        return 0;
 
@@ -1569,7 +1568,13 @@ static int aic3x_i2c_probe(struct i2c_client *i2c,
 
        ret = snd_soc_register_codec(&i2c->dev,
                        &soc_codec_dev_aic3x, &aic3x_dai, 1);
-       return ret;
+
+       if (ret != 0)
+               goto err_gpio;
+
+       list_add(&aic3x->list, &reset_list);
+
+       return 0;
 
 err_gpio:
        if (gpio_is_valid(aic3x->gpio_reset) &&
index b1266790d1174a74497e81d1848436300894b5ea..605a10b2112b3808e8d5a95e282e51daf69eec62 100644 (file)
@@ -144,8 +144,8 @@ enum spdif_gainsel {
 
 /* SPDIF Clock register */
 #define STC_SYSCLK_DIV_OFFSET          11
-#define STC_SYSCLK_DIV_MASK            (0x1ff << STC_TXCLK_SRC_OFFSET)
-#define STC_SYSCLK_DIV(x)              ((((x) - 1) << STC_TXCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK)
+#define STC_SYSCLK_DIV_MASK            (0x1ff << STC_SYSCLK_DIV_OFFSET)
+#define STC_SYSCLK_DIV(x)              ((((x) - 1) << STC_SYSCLK_DIV_OFFSET) & STC_SYSCLK_DIV_MASK)
 #define STC_TXCLK_SRC_OFFSET           8
 #define STC_TXCLK_SRC_MASK             (0x7 << STC_TXCLK_SRC_OFFSET)
 #define STC_TXCLK_SRC_SET(x)           ((x << STC_TXCLK_SRC_OFFSET) & STC_TXCLK_SRC_MASK)
index fe8e81aad6461faf545b179f6d1a912369872e6a..30ca14a6a83595d5acf4e644d7f8d50827a99a98 100644 (file)
@@ -136,7 +136,7 @@ struct sst_module_data {
        enum sst_data_type data_type;   /* type of module data */
 
        u32 size;               /* size in bytes */
-       u32 offset;             /* offset in FW file */
+       int32_t offset;         /* offset in FW file */
        u32 data_offset;        /* offset in ADSP memory space */
        void *data;             /* module data */
 };
index f46bb4ddde6fc7550573e5fdd0235afda429044a..50e4246d4b57a2df8c9ad0c54deabb4e346ca572 100644 (file)
@@ -617,7 +617,7 @@ static void hsw_notification_work(struct work_struct *work)
        case IPC_POSITION_CHANGED:
                trace_ipc_notification("DSP stream position changed for",
                        stream->reply.stream_hw_id);
-               sst_dsp_inbox_read(hsw->dsp, pos, sizeof(pos));
+               sst_dsp_inbox_read(hsw->dsp, pos, sizeof(*pos));
 
                if (stream->notify_position)
                        stream->notify_position(stream, stream->pdata);
@@ -991,7 +991,8 @@ int sst_hsw_stream_get_volume(struct sst_hsw *hsw, struct sst_hsw_stream *stream
                return -EINVAL;
 
        sst_dsp_read(hsw->dsp, volume,
-               stream->reply.volume_register_address[channel], sizeof(volume));
+               stream->reply.volume_register_address[channel],
+               sizeof(*volume));
 
        return 0;
 }
@@ -1609,7 +1610,7 @@ int sst_hsw_dx_set_state(struct sst_hsw *hsw,
        trace_ipc_request("PM enter Dx state", state);
 
        ret = ipc_tx_message_wait(hsw, header, &state_, sizeof(state_),
-               dx, sizeof(dx));
+               dx, sizeof(*dx));
        if (ret < 0) {
                dev_err(hsw->dev, "ipc: error set dx state %d failed\n", state);
                return ret;
index be873c1b0c204f4f902bfc7e2edad8c10d623e15..d32c540555c41b6c3f9b8f5817a5baa09b5ca47b 100644 (file)
@@ -1,10 +1,8 @@
 #
 # Jz4740 Platform Support
 #
-snd-soc-jz4740-objs := jz4740-pcm.o
 snd-soc-jz4740-i2s-objs := jz4740-i2s.o
 
-obj-$(CONFIG_SND_JZ4740_SOC) += snd-soc-jz4740.o
 obj-$(CONFIG_SND_JZ4740_SOC_I2S) += snd-soc-jz4740-i2s.o
 
 # Jz4740 Machine Support
index 6232b7d307aab2c553bad3c7b6a19f7f69ac997e..4d0720ed5a906d86315971a869028011c3c3a7af 100644 (file)
@@ -258,7 +258,7 @@ static int rsnd_src_init(struct rsnd_mod *mod,
 {
        struct rsnd_src *src = rsnd_mod_to_src(mod);
 
-       clk_enable(src->clk);
+       clk_prepare_enable(src->clk);
 
        return 0;
 }
@@ -269,7 +269,7 @@ static int rsnd_src_quit(struct rsnd_mod *mod,
 {
        struct rsnd_src *src = rsnd_mod_to_src(mod);
 
-       clk_disable(src->clk);
+       clk_disable_unprepare(src->clk);
 
        return 0;
 }
index 4b7e20603dd7be8032198291ee08ed9b95de88dd..1d8387c25bd85f5b312db49e42109fdfd064815f 100644 (file)
@@ -171,7 +171,7 @@ static void rsnd_ssi_hw_start(struct rsnd_ssi *ssi,
        u32 cr;
 
        if (0 == ssi->usrcnt) {
-               clk_enable(ssi->clk);
+               clk_prepare_enable(ssi->clk);
 
                if (rsnd_dai_is_clk_master(rdai)) {
                        if (rsnd_ssi_clk_from_parent(ssi))
@@ -230,7 +230,7 @@ static void rsnd_ssi_hw_stop(struct rsnd_ssi *ssi,
                                rsnd_ssi_master_clk_stop(ssi);
                }
 
-               clk_disable(ssi->clk);
+               clk_disable_unprepare(ssi->clk);
        }
 
        dev_dbg(dev, "ssi%d hw stopped\n", rsnd_mod_id(&ssi->mod));
index c8a780d0d057f43b08e74fc7c6d57f414e8aecde..7769b0a2bc5a5287f932d20524216f7c390fd4c7 100644 (file)
@@ -254,7 +254,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
 static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
 {
        struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
-       kfree(data->widget);
        kfree(data->wlist);
        kfree(data);
 }
index 7c43479623537af4f0d4179f4cce195cbea3e9b1..a74fba6d774353d33fac7f04b71abdd241e0218e 100644 (file)
@@ -12,8 +12,8 @@
 char debugfs_mountpoint[PATH_MAX + 1] = "/sys/kernel/debug";
 
 static const char * const debugfs_known_mountpoints[] = {
-       "/sys/kernel/debug/",
-       "/debug/",
+       "/sys/kernel/debug",
+       "/debug",
        0,
 };
 
index baec7d887da4fafeeacbda82697ecff6200c95da..b83184f2d484f59f3a888648fd3f548c0dc37d12 100644 (file)
@@ -4344,6 +4344,7 @@ static void pretty_print(struct trace_seq *s, void *data, int size, struct event
                                              format, len_arg, arg);
                                trace_seq_terminate(&p);
                                trace_seq_puts(s, p.buffer);
+                               trace_seq_destroy(&p);
                                arg = arg->next;
                                break;
                        default:
index 791c539374c726b7e4d3ad8f6f75aad575692787..feab942816343aba5023d1b6ab588322e7d45506 100644 (file)
@@ -876,8 +876,8 @@ struct event_filter {
 struct event_filter *pevent_filter_alloc(struct pevent *pevent);
 
 /* for backward compatibility */
-#define FILTER_NONE            PEVENT_ERRNO__FILTER_NOT_FOUND
-#define FILTER_NOEXIST         PEVENT_ERRNO__NO_FILTER
+#define FILTER_NONE            PEVENT_ERRNO__NO_FILTER
+#define FILTER_NOEXIST         PEVENT_ERRNO__FILTER_NOT_FOUND
 #define FILTER_MISS            PEVENT_ERRNO__FILTER_MISS
 #define FILTER_MATCH           PEVENT_ERRNO__FILTER_MATCH
 
index e96923310d5780e2fe62e45736b2511f43aa907d..895edd32930ce7283cbda2df8914ba44462fc7ef 100644 (file)
@@ -589,7 +589,7 @@ $(GTK_OBJS): $(OUTPUT)%.o: %.c $(LIB_H)
        $(QUIET_CC)$(CC) -o $@ -c -fPIC $(CFLAGS) $(GTK_CFLAGS) $<
 
 $(OUTPUT)libperf-gtk.so: $(GTK_OBJS) $(PERFLIBS)
-       $(QUIET_LINK)$(CC) -o $@ -shared $(ALL_LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
+       $(QUIET_LINK)$(CC) -o $@ -shared $(LDFLAGS) $(filter %.o,$^) $(GTK_LIBS)
 
 $(OUTPUT)builtin-help.o: builtin-help.c $(OUTPUT)common-cmds.h $(OUTPUT)PERF-CFLAGS
        $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) \
index b602ad93ce630ae3f611ee74f0bb6277c367a09b..83bc2385e6d3c2820958b279f77c4d08342b1e06 100644 (file)
@@ -23,9 +23,10 @@ static int sample_ustack(struct perf_sample *sample,
 
        sp = (unsigned long) regs[PERF_REG_X86_SP];
 
-       map = map_groups__find(&thread->mg, MAP__FUNCTION, (u64) sp);
+       map = map_groups__find(&thread->mg, MAP__VARIABLE, (u64) sp);
        if (!map) {
                pr_debug("failed to get stack map\n");
+               free(buf);
                return -1;
        }
 
index 99167bf644eaa8b376060b6b6d42977e9cd09425..60875d5c556c217d3433487d65cf66382a78d25b 100644 (file)
@@ -1,4 +1,3 @@
-
 #include <linux/linkage.h>
 
 #define AX      0
@@ -90,3 +89,10 @@ ENTRY(perf_regs_load)
        ret
 ENDPROC(perf_regs_load)
 #endif
+
+/*
+ * We need to provide note.GNU-stack section, saying that we want
+ * NOT executable stack. Otherwise the final linking will assume that
+ * the ELF stack should not be restricted at all and set it RWX.
+ */
+.section .note.GNU-stack,"",@progbits
index ee21fa95ebcf60c2067b32f3e9b4575974f930ad..802cf544202b7b06720afc9abb1c9580cf362892 100644 (file)
@@ -34,6 +34,14 @@ ifeq ($(ARCH),arm)
   LIBUNWIND_LIBS = -lunwind -lunwind-arm
 endif
 
+# So far there's only x86 libdw unwind support merged in perf.
+# Disable it on all other architectures in case libdw unwind
+# support is detected in system. Add supported architectures
+# to the check.
+ifneq ($(ARCH),x86)
+  NO_LIBDW_DWARF_UNWIND := 1
+endif
+
 ifeq ($(LIBUNWIND_LIBS),)
   NO_LIBUNWIND := 1
 else
@@ -109,6 +117,10 @@ CFLAGS += -Wall
 CFLAGS += -Wextra
 CFLAGS += -std=gnu99
 
+# Enforce a non-executable stack, as we may regress (again) in the future by
+# adding assembler files missing the .GNU-stack linker note.
+LDFLAGS += -Wl,-z,noexecstack
+
 EXTLIBS = -lelf -lpthread -lrt -lm -ldl
 
 ifneq ($(OUTPUT),)
@@ -186,7 +198,10 @@ VF_FEATURE_TESTS =                 \
        stackprotector-all              \
        timerfd                         \
        libunwind-debug-frame           \
-       bionic
+       bionic                          \
+       liberty                         \
+       liberty-z                       \
+       cplus-demangle
 
 # Set FEATURE_CHECK_(C|LD)FLAGS-all for all CORE_FEATURE_TESTS features.
 # If in the future we need per-feature checks/flags for features not
@@ -504,7 +519,21 @@ else
 endif
 
 ifeq ($(feature-libbfd), 1)
-  EXTLIBS += -lbfd -lz -liberty
+  EXTLIBS += -lbfd
+
+  # call all detections now so we get correct
+  # status in VF output
+  $(call feature_check,liberty)
+  $(call feature_check,liberty-z)
+  $(call feature_check,cplus-demangle)
+
+  ifeq ($(feature-liberty), 1)
+    EXTLIBS += -liberty
+  else
+    ifeq ($(feature-liberty-z), 1)
+      EXTLIBS += -liberty -lz
+    endif
+  endif
 endif
 
 ifdef NO_DEMANGLE
@@ -515,15 +544,10 @@ else
     CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
   else
     ifneq ($(feature-libbfd), 1)
-      $(call feature_check,liberty)
-      ifeq ($(feature-liberty), 1)
-        EXTLIBS += -lbfd -liberty
-      else
-        $(call feature_check,liberty-z)
-        ifeq ($(feature-liberty-z), 1)
-          EXTLIBS += -lbfd -liberty -lz
-        else
-          $(call feature_check,cplus-demangle)
+      ifneq ($(feature-liberty), 1)
+        ifneq ($(feature-liberty-z), 1)
+          # we dont have neither HAVE_CPLUS_DEMANGLE_SUPPORT
+          # or any of 'bfd iberty z' trinity
           ifeq ($(feature-cplus-demangle), 1)
             EXTLIBS += -liberty
             CFLAGS += -DHAVE_CPLUS_DEMANGLE_SUPPORT
index 5daeae1cb4c01b3a87f4c54ee4018199c12aeeb6..2f92d6e7ee007bea58636fe8509757c4f626d77d 100644 (file)
@@ -46,6 +46,7 @@ make_install_man    := install-man
 make_install_html   := install-html
 make_install_info   := install-info
 make_install_pdf    := install-pdf
+make_static         := LDFLAGS=-static
 
 # all the NO_* variable combined
 make_minimal        := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
@@ -87,6 +88,7 @@ run += make_install_bin
 # run += make_install_info
 # run += make_install_pdf
 run += make_minimal
+run += make_static
 
 ifneq ($(call has,ctags),)
 run += make_tags
index a53cd0b8c151cdb898d3711c36e5081846813a15..27c2a5efe4504945bf9c8492b62b8256abc0be33 100644 (file)
@@ -717,7 +717,7 @@ static char *get_kernel_version(const char *root_dir)
 }
 
 static int map_groups__set_modules_path_dir(struct map_groups *mg,
-                               const char *dir_name)
+                               const char *dir_name, int depth)
 {
        struct dirent *dent;
        DIR *dir = opendir(dir_name);
@@ -742,7 +742,15 @@ static int map_groups__set_modules_path_dir(struct map_groups *mg,
                            !strcmp(dent->d_name, ".."))
                                continue;
 
-                       ret = map_groups__set_modules_path_dir(mg, path);
+                       /* Do not follow top-level source and build symlinks */
+                       if (depth == 0) {
+                               if (!strcmp(dent->d_name, "source") ||
+                                   !strcmp(dent->d_name, "build"))
+                                       continue;
+                       }
+
+                       ret = map_groups__set_modules_path_dir(mg, path,
+                                                              depth + 1);
                        if (ret < 0)
                                goto out;
                } else {
@@ -786,11 +794,11 @@ static int machine__set_modules_path(struct machine *machine)
        if (!version)
                return -1;
 
-       snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s/kernel",
+       snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
                 machine->root_dir, version);
        free(version);
 
-       return map_groups__set_modules_path_dir(&machine->kmaps, modules_path);
+       return map_groups__set_modules_path_dir(&machine->kmaps, modules_path, 0);
 }
 
 static int machine__create_module(void *arg, const char *name, u64 start)
index 47b29834a6b61def09f6340013cc9b2927c03cd9..56ff9bebb577df935200aacfc1e8251ae0800bcc 100644 (file)
@@ -548,11 +548,10 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
        u32 val;
        u32 *reg;
 
-       offset >>= 1;
        reg = vgic_bitmap_get_reg(&vcpu->kvm->arch.vgic.irq_cfg,
-                                 vcpu->vcpu_id, offset);
+                                 vcpu->vcpu_id, offset >> 1);
 
-       if (offset & 2)
+       if (offset & 4)
                val = *reg >> 16;
        else
                val = *reg & 0xffff;
@@ -561,13 +560,13 @@ static bool handle_mmio_cfg_reg(struct kvm_vcpu *vcpu,
        vgic_reg_access(mmio, &val, offset,
                        ACCESS_READ_VALUE | ACCESS_WRITE_VALUE);
        if (mmio->is_write) {
-               if (offset < 4) {
+               if (offset < 8) {
                        *reg = ~0U; /* Force PPIs/SGIs to 1 */
                        return false;
                }
 
                val = vgic_cfg_compress(val);
-               if (offset & 2) {
+               if (offset & 4) {
                        *reg &= 0xffff;
                        *reg |= val << 16;
                } else {
@@ -916,6 +915,7 @@ static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
        case 0:
                if (!target_cpus)
                        return;
+               break;
 
        case 1:
                target_cpus = ((1 << nrcpus) - 1) & ~(1 << vcpu_id) & 0xff;
@@ -1667,10 +1667,11 @@ static int vgic_ioaddr_assign(struct kvm *kvm, phys_addr_t *ioaddr,
        if (addr + size < addr)
                return -EINVAL;
 
+       *ioaddr = addr;
        ret = vgic_ioaddr_overlap(kvm);
        if (ret)
-               return ret;
-       *ioaddr = addr;
+               *ioaddr = VGIC_ADDR_UNDEF;
+
        return ret;
 }
 
index 8db43701016f30cebeb37ab7e4d581166606b86f..bf06577fea51c22ab944edb9560e56f01aae2f94 100644 (file)
@@ -395,7 +395,8 @@ static int assigned_device_enable_host_msix(struct kvm *kvm,
        if (dev->entries_nr == 0)
                return r;
 
-       r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
+       r = pci_enable_msix_exact(dev->dev,
+                                 dev->host_msix_entries, dev->entries_nr);
        if (r)
                return r;
 
index 10df100c4514e856d1ca5509f87f118e37726f57..06e6401d6ef45326edcbce4c8ff96e13286d2940 100644 (file)
@@ -101,7 +101,7 @@ static void async_pf_execute(struct work_struct *work)
        if (waitqueue_active(&vcpu->wq))
                wake_up_interruptible(&vcpu->wq);
 
-       mmdrop(mm);
+       mmput(mm);
        kvm_put_kvm(vcpu->kvm);
 }
 
@@ -118,7 +118,7 @@ void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu)
                flush_work(&work->work);
 #else
                if (cancel_work_sync(&work->work)) {
-                       mmdrop(work->mm);
+                       mmput(work->mm);
                        kvm_put_kvm(vcpu->kvm); /* == work->vcpu->kvm */
                        kmem_cache_free(async_pf_cache, work);
                }
@@ -183,7 +183,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
        work->addr = hva;
        work->arch = *arch;
        work->mm = current->mm;
-       atomic_inc(&work->mm->mm_count);
+       atomic_inc(&work->mm->mm_users);
        kvm_get_kvm(work->vcpu->kvm);
 
        /* this can't really happen otherwise gfn_to_pfn_async
@@ -201,7 +201,7 @@ int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
        return 1;
 retry_sync:
        kvm_put_kvm(work->vcpu->kvm);
-       mmdrop(work->mm);
+       mmput(work->mm);
        kmem_cache_free(async_pf_cache, work);
        return 0;
 }