Merge tag 'nfs-for-3.16-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 26 Jun 2014 03:06:06 +0000 (20:06 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 26 Jun 2014 03:06:06 +0000 (20:06 -0700)
Pull NFS client fixes from Trond Myklebust:
 "Highlights include:

   - Stable fix for a data corruption case due to incorrect cache
     validation
   - Fix a couple of false positive cache invalidations
   - Fix NFSv4 security negotiation issues"

* tag 'nfs-for-3.16-2' of git://git.linux-nfs.org/projects/trondmy/linux-nfs:
  NFSv4: test SECINFO RPC_AUTH_GSS pseudoflavors for support
  NFS Return -EPERM if no supported or matching SECINFO flavor
  NFS check the return of nfs4_negotiate_security in nfs4_submount
  NFS: Don't mark the data cache as invalid if it has been flushed
  NFS: Clear NFS_INO_REVAL_PAGECACHE when we update the file size
  nfs: Fix cache_validity check in nfs_write_pageuptodate()

127 files changed:
CREDITS
Documentation/accounting/getdelays.c
Documentation/devicetree/bindings/arm/armada-38x.txt
Documentation/kernel-parameters.txt
Documentation/memory-hotplug.txt
Documentation/sysctl/kernel.txt
Documentation/sysctl/vm.txt
MAINTAINERS
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/armada-380.dtsi
arch/arm/boot/dts/armada-385-db.dts
arch/arm/boot/dts/armada-385-rd.dts
arch/arm/boot/dts/armada-385.dtsi
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/at91sam9261.dtsi
arch/arm/boot/dts/at91sam9261ek.dts
arch/arm/boot/dts/at91sam9n12.dtsi
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/boot/dts/exynos4.dtsi
arch/arm/boot/dts/imx51-babbage.dts
arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
arch/arm/boot/dts/imx53-m53evk.dts
arch/arm/boot/dts/imx6dl-hummingboard.dts
arch/arm/boot/dts/imx6q-gw51xx.dts
arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
arch/arm/boot/dts/imx6qdl-microsom.dtsi
arch/arm/boot/dts/imx6sl.dtsi
arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
arch/arm/boot/dts/stih415.dtsi
arch/arm/boot/dts/stih416-b2020-revE.dts [deleted file]
arch/arm/boot/dts/stih416-b2020e.dts [new file with mode: 0644]
arch/arm/boot/dts/stih416.dtsi
arch/arm/common/scoop.c
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/mvebu_v7_defconfig
arch/arm/include/asm/thread_info.h
arch/arm/kernel/perf_event_v7.c
arch/arm/mach-exynos/hotplug.c
arch/arm/mach-exynos/mcpm-exynos.c
arch/arm/mach-exynos/pm.c
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/clk-imx6sl.c
arch/arm/mach-integrator/integrator_ap.c
arch/arm/mach-integrator/integrator_cp.c
arch/arm/mach-mvebu/Kconfig
arch/arm/mach-omap2/Kconfig
arch/arm/mach-sti/Kconfig
arch/arm/mach-ux500/Kconfig
arch/arm/mach-vexpress/Kconfig
arch/arm/mm/Kconfig
arch/arm/mm/proc-arm925.S
arch/ia64/include/uapi/asm/fcntl.h
arch/powerpc/Kconfig.debug
arch/powerpc/include/asm/code-patching.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/swab.h
arch/powerpc/kernel/ftrace.c
arch/powerpc/kernel/iomap.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/prom_init_check.sh
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/platforms/cell/cbe_thermal.c
arch/powerpc/platforms/powernv/Makefile
arch/powerpc/platforms/powernv/opal-takeover.S [deleted file]
arch/powerpc/sysdev/dart_iommu.c
arch/sparc/include/asm/irq_64.h
arch/sparc/kernel/process_64.c
arch/x86/include/asm/irq.h
arch/x86/kernel/apic/hw_nmi.c
drivers/base/dma-contiguous.c
drivers/block/rbd.c
drivers/clocksource/exynos_mct.c
drivers/macintosh/smu.c
drivers/memstick/host/rtsx_pci_ms.c
drivers/misc/Kconfig
drivers/tty/serial/msm_serial.c
drivers/vhost/net.c
drivers/vhost/scsi.c
fs/aio.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/dlm/dlmthread.c
fs/ocfs2/dlm/dlmunlock.c
fs/ocfs2/namei.c
fs/ocfs2/ocfs2_trace.h
fs/ocfs2/refcounttree.c
fs/ocfs2/super.c
include/dt-bindings/clock/imx6sl-clock.h
include/dt-bindings/clock/stih415-clks.h
include/dt-bindings/clock/stih416-clks.h
include/linux/nmi.h
include/linux/page-flags.h
include/trace/ftrace.h
include/trace/syscall.h
kernel/fork.c
kernel/kexec.c
kernel/smp.c
kernel/sysctl.c
kernel/tracepoint.c
kernel/watchdog.c
lib/Kconfig.debug
lib/lz4/lz4_decompress.c
lib/lzo/lzo1x_decompress_safe.c
mm/huge_memory.c
mm/hugetlb.c
mm/ksm.c
mm/mempolicy.c
mm/migrate.c
mm/nommu.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/slab.c
samples/trace_events/trace-events-sample.h
scripts/checkpatch.pl
tools/testing/selftests/powerpc/tm/Makefile
tools/testing/selftests/powerpc/tm/tm-resched-dscr.c

diff --git a/CREDITS b/CREDITS
index c322dcfb926d3c2d850f22af9b3c287e086bd9c2..28ee1514b9deec0d19bca725b74ca6934d719f24 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -9,6 +9,10 @@
                        Linus
 ----------
 
+M: Matt Mackal
+E: mpm@selenic.com
+D: SLOB slab allocator
+
 N: Matti Aarnio
 E: mea@nic.funet.fi
 D: Alpha systems hacking, IPv6 and other network related stuff
index c6a06b71594d4726ed6cfe5577dbaeda8cba1bb0..f40578026a04519e5e5a34ec613b36131c343444 100644 (file)
@@ -314,6 +314,7 @@ int main(int argc, char *argv[])
                        break;
                case 'm':
                        strncpy(cpumask, optarg, sizeof(cpumask));
+                       cpumask[sizeof(cpumask) - 1] = '\0';
                        maskset = 1;
                        printf("cpumask %s maskset %d\n", cpumask, maskset);
                        break;
index 11f2330a6554588272372148a367ca8d4a914043..ad9f8ed4d9bd7a6334907b07f4b03122ba4df018 100644 (file)
@@ -6,5 +6,15 @@ following property:
 
 Required root node property:
 
- - compatible: must contain either "marvell,armada380" or
-   "marvell,armada385" depending on the variant of the SoC being used.
+ - compatible: must contain "marvell,armada380"
+
+In addition, boards using the Marvell Armada 385 SoC shall have the
+following property before the previous one:
+
+Required root node property:
+
+compatible: must contain "marvell,armada385"
+
+Example:
+
+compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada380";
index 884904975d0b98ec126f22b0a51b6b26569d0b99..c1b9aa8c5a52e807e6458d40d96d4f1bc91f107b 100644 (file)
@@ -3130,6 +3130,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        [KNL] Should the soft-lockup detector generate panics.
                        Format: <integer>
 
+       softlockup_all_cpu_backtrace=
+                       [KNL] Should the soft-lockup detector generate
+                       backtraces on all cpus.
+                       Format: <integer>
+
        sonypi.*=       [HW] Sony Programmable I/O Control Device driver
                        See Documentation/laptops/sonypi.txt
 
index f304edb8fbe7fcd3f7297d9880c8cd763c27de2e..45134dc2385424fc3ce0ecd01ab20a221f7d8140 100644 (file)
@@ -209,15 +209,12 @@ If memory device is found, memory hotplug code will be called.
 
 4.2 Notify memory hot-add event by hand
 ------------
-On powerpc, the firmware does not notify a memory hotplug event to the kernel.
-Therefore, "probe" interface is supported to notify the event to the kernel.
-This interface depends on CONFIG_ARCH_MEMORY_PROBE.
-
-CONFIG_ARCH_MEMORY_PROBE is supported on powerpc only. On x86, this config
-option is disabled by default since ACPI notifies a memory hotplug event to
-the kernel, which performs its hotplug operation as the result. Please
-enable this option if you need the "probe" interface for testing purposes
-on x86.
+On some architectures, the firmware may not notify the kernel of a memory
+hotplug event.  Therefore, the memory "probe" interface is supported to
+explicitly notify the kernel.  This interface depends on
+CONFIG_ARCH_MEMORY_PROBE and can be configured on powerpc, sh, and x86
+if hotplug is supported, although for x86 this should be handled by ACPI
+notification.
 
 Probe interface is located at
 /sys/devices/system/memory/probe
index 708bb7f1b7e03cf89e7338ecc042aaaa6ba19333..c14374e71775f62ba1ed72a856a702cb1d819e47 100644 (file)
@@ -75,6 +75,7 @@ show up in /proc/sys/kernel:
 - shmall
 - shmmax                      [ sysv ipc ]
 - shmmni
+- softlockup_all_cpu_backtrace
 - stop-a                      [ SPARC only ]
 - sysrq                       ==> Documentation/sysrq.txt
 - sysctl_writes_strict
@@ -783,6 +784,22 @@ via the /proc/sys interface:
 
 ==============================================================
 
+softlockup_all_cpu_backtrace:
+
+This value controls the soft lockup detector thread's behavior
+when a soft lockup condition is detected as to whether or not
+to gather further debug information. If enabled, each cpu will
+be issued an NMI and instructed to capture stack trace.
+
+This feature is only applicable for architectures which support
+NMI.
+
+0: do nothing. This is the default behavior.
+
+1: on detection capture more debug information.
+
+==============================================================
+
 tainted:
 
 Non-zero if the kernel has been tainted.  Numeric values, which
index bd4b34c0373878afc200173b4955724ab9899fa2..4415aa91568104fbcfa9cc594373513180a29c24 100644 (file)
@@ -702,7 +702,8 @@ The batch value of each per cpu pagelist is also updated as a result.  It is
 set to pcp->high/4.  The upper limit of batch is (PAGE_SHIFT * 8)
 
 The initial value is zero.  Kernel does not use this value at boot time to set
-the high water marks for each per cpu page list.
+the high water marks for each per cpu page list.  If the user writes '0' to this
+sysctl, it will revert to this default behavior.
 
 ==============================================================
 
index 3f2e171047b90e8a3f396a9c1266a3d5f6c220a2..3cc94fff780f4e6a4249ef7332c89b5afb4a3d58 100644 (file)
@@ -8196,13 +8196,15 @@ S:      Maintained
 F:     drivers/usb/misc/sisusbvga/
 
 SLAB ALLOCATOR
-M:     Christoph Lameter <cl@linux-foundation.org>
+M:     Christoph Lameter <cl@linux.com>
 M:     Pekka Enberg <penberg@kernel.org>
-M:     Matt Mackall <mpm@selenic.com>
+M:     David Rientjes <rientjes@google.com>
+M:     Joonsoo Kim <iamjoonsoo.kim@lge.com>
+M:     Andrew Morton <akpm@linux-foundation.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     include/linux/sl?b*.h
-F:     mm/sl?b.c
+F:     mm/sl?b*
 
 SLEEPABLE READ-COPY UPDATE (SRCU)
 M:     Lai Jiangshan <laijs@cn.fujitsu.com>
index 5986ff63b90195a4a1782997fcdabc1ffe5418a8..adb5ed9e269e196a55c380002d266062fd06c3b7 100644 (file)
@@ -357,7 +357,7 @@ dtb-$(CONFIG_ARCH_STI)+= stih407-b2120.dtb \
        stih415-b2020.dtb \
        stih416-b2000.dtb \
        stih416-b2020.dtb \
-       stih416-b2020-revE.dtb
+       stih416-b2020e.dtb
 dtb-$(CONFIG_MACH_SUN4I) += \
        sun4i-a10-a1000.dtb \
        sun4i-a10-cubieboard.dtb \
index e69bc6759c39d198e58e6865768ffef7a690da06..4173a8ab34e76a9d3a21bc50a73516379424ca47 100644 (file)
@@ -16,7 +16,7 @@
 
 / {
        model = "Marvell Armada 380 family SoC";
-       compatible = "marvell,armada380", "marvell,armada38x";
+       compatible = "marvell,armada380";
 
        cpus {
                #address-cells = <1>;
index 5bae4731828bd222b5f5ee0f51b5ca9b452a0119..1af886f1e4864adad3f0d210b8dbc146a468fdd7 100644 (file)
@@ -16,7 +16,7 @@
 
 / {
        model = "Marvell Armada 385 Development Board";
-       compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada38x";
+       compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada380";
 
        chosen {
                bootargs = "console=ttyS0,115200 earlyprintk";
index 40893255a3f0edad962ab006e6e11b716fa9a019..aaca2861dc87aaeaad0ebc7b97bb2354ea99891b 100644 (file)
@@ -17,7 +17,7 @@
 
 / {
        model = "Marvell Armada 385 Reference Design";
-       compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada38x";
+       compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada380";
 
        chosen {
                bootargs = "console=ttyS0,115200 earlyprintk";
index f011009bf4cf3ec3ce0dd411693df6c4c6bfe0b3..6283d7912f71b8e826117f3edd3bd26c67c059bc 100644 (file)
@@ -16,7 +16,7 @@
 
 / {
        model = "Marvell Armada 385 family SoC";
-       compatible = "marvell,armada385", "marvell,armada38x";
+       compatible = "marvell,armada385", "marvell,armada380";
 
        cpus {
                #address-cells = <1>;
index 3de364e81b5233b615035765b32a0f930f442a95..689fa1a467289578ccb0344b7c7550be1e48d709 100644 (file)
@@ -20,7 +20,7 @@
 
 / {
        model = "Marvell Armada 38x family SoC";
-       compatible = "marvell,armada38x";
+       compatible = "marvell,armada380";
 
        aliases {
                gpio0 = &gpio0;
index b309c1c6e848958d3af6c6679a1dc19402329402..04927db1d6bf1f8ba8053b9365bdb6404297c101 100644 (file)
                                #size-cells = <0>;
                                #interrupt-cells = <1>;
 
-                               slow_rc_osc: slow_rc_osc {
-                                       compatible = "fixed-clock";
+                               main_osc: main_osc {
+                                       compatible = "atmel,at91rm9200-clk-main-osc";
                                        #clock-cells = <0>;
-                                       clock-frequency = <32768>;
-                                       clock-accuracy = <50000000>;
-                               };
-
-                               clk32k: slck {
-                                       compatible = "atmel,at91sam9260-clk-slow";
-                                       #clock-cells = <0>;
-                                       clocks = <&slow_rc_osc &slow_xtal>;
+                                       interrupts-extended = <&pmc AT91_PMC_MOSCS>;
+                                       clocks = <&main_xtal>;
                                };
 
                                main: mainck {
                                        compatible = "atmel,at91rm9200-clk-main";
                                        #clock-cells = <0>;
-                                       interrupts-extended = <&pmc AT91_PMC_MOSCS>;
-                                       clocks = <&main_xtal>;
+                                       clocks = <&main_osc>;
                                };
 
                                plla: pllack {
                                        compatible = "atmel,at91rm9200-clk-master";
                                        #clock-cells = <0>;
                                        interrupts-extended = <&pmc AT91_PMC_MCKRDY>;
-                                       clocks = <&clk32k>, <&main>, <&plla>, <&pllb>;
+                                       clocks = <&slow_xtal>, <&main>, <&plla>, <&pllb>;
                                        atmel,clk-output-range = <0 94000000>;
                                        atmel,clk-divisors = <1 2 4 0>;
                                };
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                        interrupt-parent = <&pmc>;
-                                       clocks = <&clk32k>, <&main>, <&plla>, <&pllb>;
+                                       clocks = <&slow_xtal>, <&main>, <&plla>, <&pllb>;
 
                                        prog0: prog0 {
                                                #clock-cells = <0>;
index c6683ea8b74350b2dace53015b43d1f78b3a7762..aa35a7aec9a87017446f6653b01178267209250a 100644 (file)
                reg = <0x20000000 0x4000000>;
        };
 
+       slow_xtal {
+               clock-frequency = <32768>;
+       };
+
        main_xtal {
                clock-frequency = <18432000>;
        };
index d1b82e6635d5dc79fb399196c88473423bece9d6..287795985e32f1590090219599134f0447543cb5 100644 (file)
                                                                      <595000000 650000000 3 0>,
                                                                      <545000000 600000000 0 1>,
                                                                      <495000000 555000000 1 1>,
-                                                                     <445000000 500000000 1 2>,
-                                                                     <400000000 450000000 1 3>;
+                                                                     <445000000 500000000 2 1>,
+                                                                     <400000000 450000000 3 1>;
                                };
 
                                plladiv: plladivck {
index 1a57298636a5b6307bd27a23e42f3678b02fe63d..d6133f497207ddee93b92ba8957a2cac73ace5a9 100644 (file)
                                                                       595000000 650000000 3 0
                                                                       545000000 600000000 0 1
                                                                       495000000 555000000 1 1
-                                                                      445000000 500000000 1 2
-                                                                      400000000 450000000 1 3>;
+                                                                      445000000 500000000 2 1
+                                                                      400000000 450000000 3 1>;
                                };
 
                                plladiv: plladivck {
index b8ece4be41ca69d3cacdf797c1de6233f8ddbaa8..fbaf426d2daafc8beb505fab17c73250e7cc3a9a 100644 (file)
                compatible = "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
                interrupt-controller;
-               reg = <0x10490000 0x1000>, <0x10480000 0x100>;
+               reg = <0x10490000 0x10000>, <0x10480000 0x10000>;
        };
 
        combiner: interrupt-controller@10440000 {
index 6bc3243a80d343052c915f80a5c0c794f03a17d5..181d77fa2fa68df10d46282a41415bf711462773 100644 (file)
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       fsl,cd-controller;
-       fsl,wp-controller;
+       cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
 &esdhc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc2>;
-       cd-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
                                MX51_PAD_SD1_DATA1__SD1_DATA1           0x20d5
                                MX51_PAD_SD1_DATA2__SD1_DATA2           0x20d5
                                MX51_PAD_SD1_DATA3__SD1_DATA3           0x20d5
-                               MX51_PAD_GPIO1_0__SD1_CD                0x20d5
-                               MX51_PAD_GPIO1_1__SD1_WP                0x20d5
+                               MX51_PAD_GPIO1_0__GPIO1_0               0x100
+                               MX51_PAD_GPIO1_1__GPIO1_1               0x100
                        >;
                };
 
index 75e66c9c6144ef02badcb6cafee8755418035b30..31cfb7f2b02ec141d11c3a762f26ffdb1dae1e27 100644 (file)
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1 &pinctrl_esdhc1_cd>;
-       fsl,cd-controller;
+       cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
 
                pinctrl_esdhc1_cd: esdhc1_cd {
                        fsl,pins = <
-                               MX51_PAD_GPIO1_0__SD1_CD 0x20d5
+                               MX51_PAD_GPIO1_0__GPIO1_0 0xd5
                        >;
                };
 
index d5d146a8b149cd14601cef1b26857002d27eec9b..c4956b0ffb3561c35151373628237769c84c474d 100644 (file)
                      <0xb0000000 0x20000000>;
        };
 
-       soc {
-               display1: display@di1 {
-                       compatible = "fsl,imx-parallel-display";
-                       interface-pix-fmt = "bgr666";
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_ipu_disp1>;
-
-                       display-timings {
-                               800x480p60 {
-                                       native-mode;
-                                       clock-frequency = <31500000>;
-                                       hactive = <800>;
-                                       vactive = <480>;
-                                       hfront-porch = <40>;
-                                       hback-porch = <88>;
-                                       hsync-len = <128>;
-                                       vback-porch = <33>;
-                                       vfront-porch = <9>;
-                                       vsync-len = <3>;
-                                       vsync-active = <1>;
-                               };
+       display1: display@di1 {
+               compatible = "fsl,imx-parallel-display";
+               interface-pix-fmt = "bgr666";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_ipu_disp1>;
+
+               display-timings {
+                       800x480p60 {
+                               native-mode;
+                               clock-frequency = <31500000>;
+                               hactive = <800>;
+                               vactive = <480>;
+                               hfront-porch = <40>;
+                               hback-porch = <88>;
+                               hsync-len = <128>;
+                               vback-porch = <33>;
+                               vfront-porch = <9>;
+                               vsync-len = <3>;
+                               vsync-active = <1>;
                        };
                };
 
index 5373a5f2782bedb40786a4701833b162821a1553..c8e51dd41b8f2e9f729e852ee68140ae896b3ad6 100644 (file)
                        fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
                };
 
+               pinctrl_hummingboard_usbotg_id: hummingboard-usbotg-id {
+                       /*
+                        * Similar to pinctrl_usbotg_2, but we want it
+                        * pulled down for a fixed host connection.
+                        */
+                       fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
+               };
+
                pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus {
                        fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
                };
 };
 
 &usbotg {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hummingboard_usbotg_id>;
        vbus-supply = <&reg_usbotg_vbus>;
        status = "okay";
 };
index af4929aee075a1c515cf14b21f3597a1ae068bcc..0e1406e58eff1d73a3601d09858147fecbb1b9e0 100644 (file)
@@ -11,7 +11,7 @@
 
 /dts-v1/;
 #include "imx6q.dtsi"
-#include "imx6qdl-gw54xx.dtsi"
+#include "imx6qdl-gw51xx.dtsi"
 
 / {
        model = "Gateworks Ventana i.MX6 Quad GW51XX";
index 25da82a03110f62eddc0b4b79ec637c0d4ac67d6..e8e781656b3f5ec422800d731768eefb70a88ed7 100644 (file)
                pinctrl-0 = <&pinctrl_cubox_i_ir>;
        };
 
+       pwmleds {
+               compatible = "pwm-leds";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_cubox_i_pwm1>;
+
+               front {
+                       active-low;
+                       label = "imx6:red:front";
+                       max-brightness = <248>;
+                       pwms = <&pwm1 0 50000>;
+               };
+       };
+
        regulators {
                compatible = "simple-bus";
 
                        >;
                };
 
+               pinctrl_cubox_i_pwm1: cubox-i-pwm1-front-led {
+                       fsl,pins = <MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0>;
+               };
+
                pinctrl_cubox_i_spdif: cubox-i-spdif {
                        fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
                };
                        fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>;
                };
 
+               pinctrl_cubox_i_usbotg_id: cubox-i-usbotg-id {
+                       /*
+                        * The Cubox-i pulls this low, but as it's pointless
+                        * leaving it as a pull-up, even if it is just 10uA.
+                        */
+                       fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
+               };
+
                pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus {
                        fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x4001b0b0>;
                };
 };
 
 &usbotg {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_cubox_i_usbotg_id>;
        vbus-supply = <&reg_usbotg_vbus>;
        status = "okay";
 };
index 31665adcbf399ee436082d3ff3a1dc20334c9c9a..0db15af41cb10d2a919df2b2c497be7937c80f33 100644 (file)
        status = "okay";
 
        pmic: ltc3676@3c {
-               compatible = "ltc,ltc3676";
+               compatible = "lltc,ltc3676";
                reg = <0x3c>;
 
                regulators {
index 367af3ec94353a18778df02a1130ac39dc734f6a..744c8a2d81f6b715972fdd0ab77d35597c6d4d95 100644 (file)
        };
 
        pmic: ltc3676@3c {
-               compatible = "ltc,ltc3676";
+               compatible = "lltc,ltc3676";
                reg = <0x3c>;
 
                regulators {
        codec: sgtl5000@0a {
                compatible = "fsl,sgtl5000";
                reg = <0x0a>;
-               clocks = <&clks 169>;
+               clocks = <&clks 201>;
                VDDA-supply = <&reg_1p8v>;
                VDDIO-supply = <&reg_3p3v>;
        };
index c91b5a6c769bfd5f154ff41c0a6389ccb4184374..adf150c1be90bd365b3060aaee71929d4b05007b 100644 (file)
        };
 
        pmic: ltc3676@3c {
-               compatible = "ltc,ltc3676";
+               compatible = "lltc,ltc3676";
                reg = <0x3c>;
 
                regulators {
index d729d0b15f251bbf6e6a50fad6ec858856ba2b64..79eac6849d4c9d8964882d1e64435d868c2c7c01 100644 (file)
                                MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA    0x1b0b1
                        >;
                };
-
-               pinctrl_microsom_usbotg: microsom-usbotg {
-                       /*
-                        * Similar to pinctrl_usbotg_2, but we want it
-                        * pulled down for a fixed host connection.
-                        */
-                       fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
-               };
        };
 };
 
@@ -26,8 +18,3 @@
        pinctrl-0 = <&pinctrl_microsom_uart1>;
        status = "okay";
 };
-
-&usbotg {
-       pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_microsom_usbotg>;
-};
index 2d4e5285f3f36556a15c4d0b0ff6561ae3db4d2a..57d4abe03a94f55180e6c6bf01f5919657fd9408 100644 (file)
                                compatible = "fsl,imx6sl-fec", "fsl,imx25-fec";
                                reg = <0x02188000 0x4000>;
                                interrupts = <0 114 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX6SL_CLK_ENET_REF>,
+                               clocks = <&clks IMX6SL_CLK_ENET>,
                                         <&clks IMX6SL_CLK_ENET_REF>;
                                clock-names = "ipg", "ahb";
                                status = "disabled";
index c5a1fc75c7a3b43778e0f8634776a2d6a2150aed..b2d9834bf4584e651b442166568c59e8c981608b 100644 (file)
                compatible = "ethernet-phy-id0141.0cb0",
                             "ethernet-phy-ieee802.3-c22";
                reg = <0>;
-               phy-connection-type = "rgmii-id";
        };
 
        ethphy1: ethernet-phy@1 {
                compatible = "ethernet-phy-id0141.0cb0",
                             "ethernet-phy-ieee802.3-c22";
                reg = <1>;
-               phy-connection-type = "rgmii-id";
        };
 };
 
        status = "okay";
        ethernet0-port@0 {
                phy-handle = <&ethphy0>;
+               phy-connection-type = "rgmii-id";
        };
 };
 
        status = "okay";
        ethernet1-port@0 {
                phy-handle = <&ethphy1>;
+               phy-connection-type = "rgmii-id";
        };
 };
index d6f254f302fe84c9a8c6ef857f5d64c1912fa2d7..a0f6f75fe3b558d6cb32dbfb1c03f2fb121c45ac 100644 (file)
 
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii0>;
-                       clock-names     = "stmmaceth";
-                       clocks          = <&clk_s_a1_ls CLK_GMAC0_PHY>;
+                       clock-names     = "stmmaceth", "sti-ethclk";
+                       clocks          = <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>;
                };
 
                ethernet1: dwmac@fef08000 {
                        reset-names             = "stmmaceth";
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii1>;
-                       clock-names     = "stmmaceth";
-                       clocks          = <&clk_s_a0_ls CLK_ETH1_PHY>;
+                       clock-names     = "stmmaceth", "sti-ethclk";
+                       clocks          = <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>;
                };
 
                rc: rc@fe518000 {
diff --git a/arch/arm/boot/dts/stih416-b2020-revE.dts b/arch/arm/boot/dts/stih416-b2020-revE.dts
deleted file mode 100644 (file)
index ba0fa2c..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2014 STMicroelectronics (R&D) Limited.
- * Author: Lee Jones <lee.jones@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-/dts-v1/;
-#include "stih416.dtsi"
-#include "stih41x-b2020.dtsi"
-/ {
-       model = "STiH416 B2020 REV-E";
-       compatible = "st,stih416-b2020", "st,stih416";
-
-       soc {
-               leds {
-                       compatible = "gpio-leds";
-                       red {
-                               #gpio-cells             = <1>;
-                               label                   = "Front Panel LED";
-                               gpios                   = <&PIO4 1>;
-                               linux,default-trigger   = "heartbeat";
-                       };
-                       green {
-                               gpios                   = <&PIO1 3>;
-                               default-state           = "off";
-                       };
-               };
-
-               ethernet1: dwmac@fef08000 {
-                       snps,reset-gpio = <&PIO0 7>;
-               };
-       };
-};
diff --git a/arch/arm/boot/dts/stih416-b2020e.dts b/arch/arm/boot/dts/stih416-b2020e.dts
new file mode 100644 (file)
index 0000000..ba0fa2c
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2014 STMicroelectronics (R&D) Limited.
+ * Author: Lee Jones <lee.jones@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ */
+/dts-v1/;
+#include "stih416.dtsi"
+#include "stih41x-b2020.dtsi"
+/ {
+       model = "STiH416 B2020 REV-E";
+       compatible = "st,stih416-b2020", "st,stih416";
+
+       soc {
+               leds {
+                       compatible = "gpio-leds";
+                       red {
+                               #gpio-cells             = <1>;
+                               label                   = "Front Panel LED";
+                               gpios                   = <&PIO4 1>;
+                               linux,default-trigger   = "heartbeat";
+                       };
+                       green {
+                               gpios                   = <&PIO1 3>;
+                               default-state           = "off";
+                       };
+               };
+
+               ethernet1: dwmac@fef08000 {
+                       snps,reset-gpio = <&PIO0 7>;
+               };
+       };
+};
index 06473c5d9ea989493f9675e857a1efa538ac32c7..84758d76d064f5bc790cc0b08629e48bd8fec7ea 100644 (file)
                        reset-names             = "stmmaceth";
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii0>;
-                       clock-names     = "stmmaceth";
-                       clocks          = <&clk_s_a1_ls CLK_GMAC0_PHY>;
+                       clock-names     = "stmmaceth", "sti-ethclk";
+                       clocks          = <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>;
                };
 
                ethernet1: dwmac@fef08000 {
                        reset-names     = "stmmaceth";
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii1>;
-                       clock-names     = "stmmaceth";
-                       clocks          = <&clk_s_a0_ls CLK_ETH1_PHY>;
+                       clock-names     = "stmmaceth", "sti-ethclk";
+                       clocks          = <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>;
                };
 
                rc: rc@fe518000 {
index 6ef146edd0cd24849757ab82026c6e33ebf9a199..a20fa80776d3d873c6ef8ef849a8ac2b214b2776 100644 (file)
@@ -182,7 +182,6 @@ static int scoop_probe(struct platform_device *pdev)
        struct scoop_config *inf;
        struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        int ret;
-       int temp;
 
        if (!mem)
                return -EINVAL;
index ef8815327e5b55d421f4fd3012e2dc43e850562f..59b7e45142d80931c5e46882188c524530d4b22a 100644 (file)
@@ -186,6 +186,7 @@ CONFIG_VIDEO_MX3=y
 CONFIG_V4L_MEM2MEM_DRIVERS=y
 CONFIG_VIDEO_CODA=y
 CONFIG_SOC_CAMERA_OV2640=y
+CONFIG_IMX_IPUV3_CORE=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
index 17d9462b9fb9e188cceead9b7c43ef9a3500a1fa..be1a3455a9fe7e17e8698d85865e24071140f345 100644 (file)
@@ -353,6 +353,7 @@ CONFIG_MFD_NVEC=y
 CONFIG_KEYBOARD_NVEC=y
 CONFIG_SERIO_NVEC_PS2=y
 CONFIG_NVEC_POWER=y
+CONFIG_QCOM_GSBI=y
 CONFIG_COMMON_CLK_QCOM=y
 CONFIG_MSM_GCC_8660=y
 CONFIG_MSM_MMCC_8960=y
index e11170e3744248bdc4e07fb004a10d49c2c7c494..b0bfefa23902c01fc1faa6495677ff27a0e86877 100644 (file)
@@ -14,6 +14,7 @@ CONFIG_MACH_ARMADA_370=y
 CONFIG_MACH_ARMADA_375=y
 CONFIG_MACH_ARMADA_38X=y
 CONFIG_MACH_ARMADA_XP=y
+CONFIG_MACH_DOVE=y
 CONFIG_NEON=y
 # CONFIG_CACHE_L2X0 is not set
 # CONFIG_SWP_EMULATE is not set
@@ -52,6 +53,7 @@ CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_I2C=y
 CONFIG_SPI=y
 CONFIG_SPI_ORION=y
index f989d7c22dc5ac00c17d3f8ab0b6d3a74565e062..e4e4208a913037303e24a4c35bcb794b773e21fc 100644 (file)
@@ -114,8 +114,14 @@ static inline struct thread_info *current_thread_info(void)
        ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
 #define thread_saved_sp(tsk)   \
        ((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
+
+#ifndef CONFIG_THUMB2_KERNEL
 #define thread_saved_fp(tsk)   \
        ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
+#else
+#define thread_saved_fp(tsk)   \
+       ((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
+#endif
 
 extern void crunch_task_disable(struct thread_info *);
 extern void crunch_task_copy(struct thread_info *, void *);
index 2037f72059874260558cbcb205848349ce6765a4..1d37568c547aefa9d6ae883805e980def09316d0 100644 (file)
@@ -1924,7 +1924,7 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
                                   struct perf_event *event)
 {
        int idx;
-       int bit;
+       int bit = -1;
        unsigned int prefix;
        unsigned int region;
        unsigned int code;
@@ -1953,7 +1953,7 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
        }
 
        idx = armv7pmu_get_event_idx(cpuc, event);
-       if (idx < 0 && krait_event)
+       if (idx < 0 && bit >= 0)
                clear_bit(bit, cpuc->used_mask);
 
        return idx;
index 69fa483973943a2ad6373e2c6f1c2e7063e1c802..8a134d019cb3af0ab7d792ae3ca177dd039df95d 100644 (file)
@@ -46,13 +46,7 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
                if (cpu == 1)
                        exynos_cpu_power_down(cpu);
 
-               /*
-                * here's the WFI
-                */
-               asm(".word      0xe320f003\n"
-                   :
-                   :
-                   : "memory", "cc");
+               wfi();
 
                if (pen_release == cpu_logical_map(cpu)) {
                        /*
index 0498d0b887eff0d0b5e9d9273a3f94b0d46d7255..ace0ed617476ec113ab431145fc11fa2dfa425af 100644 (file)
@@ -25,7 +25,6 @@
 
 #define EXYNOS5420_CPUS_PER_CLUSTER    4
 #define EXYNOS5420_NR_CLUSTERS         2
-#define MCPM_BOOT_ADDR_OFFSET          0x1c
 
 /*
  * The common v7_exit_coherency_flush API could not be used because of the
@@ -343,11 +342,13 @@ static int __init exynos_mcpm_init(void)
        pr_info("Exynos MCPM support installed\n");
 
        /*
-        * Future entries into the kernel can now go
-        * through the cluster entry vectors.
+        * U-Boot SPL is hardcoded to jump to the start of ns_sram_base_addr
+        * as part of secondary_cpu_start().  Let's redirect it to the
+        * mcpm_entry_point().
         */
-       __raw_writel(virt_to_phys(mcpm_entry_point),
-                       ns_sram_base_addr + MCPM_BOOT_ADDR_OFFSET);
+       __raw_writel(0xe59f0000, ns_sram_base_addr);     /* ldr r0, [pc, #0] */
+       __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx  r0 */
+       __raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8);
 
        iounmap(ns_sram_base_addr);
 
index 87c0d34c7fbab3625386667cfe6ede06ab9fb0c8..202ca73e49c4f257e65bd201e9bf511cf979ee6f 100644 (file)
@@ -300,7 +300,7 @@ static int exynos_pm_suspend(void)
        tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
        __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
 
-       if (!soc_is_exynos5250())
+       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
                exynos_cpu_save_register();
 
        return 0;
@@ -334,7 +334,7 @@ static void exynos_pm_resume(void)
        if (exynos_pm_central_resume())
                goto early_wakeup;
 
-       if (!soc_is_exynos5250())
+       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
                exynos_cpu_restore_register();
 
        /* For release retention */
@@ -353,7 +353,7 @@ static void exynos_pm_resume(void)
 
        s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
 
-       if (!soc_is_exynos5250())
+       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
                scu_enable(S5P_VA_SCU);
 
 early_wakeup:
@@ -440,15 +440,18 @@ static int exynos_cpu_pm_notifier(struct notifier_block *self,
        case CPU_PM_ENTER:
                if (cpu == 0) {
                        exynos_pm_central_suspend();
-                       exynos_cpu_save_register();
+                       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
+                               exynos_cpu_save_register();
                }
                break;
 
        case CPU_PM_EXIT:
                if (cpu == 0) {
-                       if (!soc_is_exynos5250())
+                       if (read_cpuid_part_number() ==
+                                       ARM_CPU_PART_CORTEX_A9) {
                                scu_enable(S5P_VA_SCU);
-                       exynos_cpu_restore_register();
+                               exynos_cpu_restore_register();
+                       }
                        exynos_pm_central_resume();
                }
                break;
index 28fa2fa49e5dc4d8a4490377c62bd62324018d15..4b5185748f744a47b7742d66a59bda80715d9b90 100644 (file)
@@ -734,9 +734,9 @@ config SOC_IMX6
        select HAVE_IMX_MMDC
        select HAVE_IMX_SRC
        select MFD_SYSCON
-       select PL310_ERRATA_588369 if CACHE_PL310
-       select PL310_ERRATA_727915 if CACHE_PL310
-       select PL310_ERRATA_769419 if CACHE_PL310
+       select PL310_ERRATA_588369 if CACHE_L2X0
+       select PL310_ERRATA_727915 if CACHE_L2X0
+       select PL310_ERRATA_769419 if CACHE_L2X0
 
 config SOC_IMX6Q
        bool "i.MX6 Quad/DualLite support"
@@ -771,9 +771,9 @@ config SOC_VF610
        select ARM_GIC
        select PINCTRL_VF610
        select VF_PIT_TIMER
-       select PL310_ERRATA_588369 if CACHE_PL310
-       select PL310_ERRATA_727915 if CACHE_PL310
-       select PL310_ERRATA_769419 if CACHE_PL310
+       select PL310_ERRATA_588369 if CACHE_L2X0
+       select PL310_ERRATA_727915 if CACHE_L2X0
+       select PL310_ERRATA_769419 if CACHE_L2X0
 
        help
          This enable support for Freescale Vybrid VF610 processor.
index 21cf06cebade559b59b787e88b4c8876822d2f59..5408ca70c8d62ca01cc9d7dbf0fb331b51edec32 100644 (file)
@@ -312,6 +312,7 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
        clks[IMX6SL_CLK_ECSPI2]       = imx_clk_gate2("ecspi2",       "ecspi_root",        base + 0x6c, 2);
        clks[IMX6SL_CLK_ECSPI3]       = imx_clk_gate2("ecspi3",       "ecspi_root",        base + 0x6c, 4);
        clks[IMX6SL_CLK_ECSPI4]       = imx_clk_gate2("ecspi4",       "ecspi_root",        base + 0x6c, 6);
+       clks[IMX6SL_CLK_ENET]         = imx_clk_gate2("enet",         "ipg",               base + 0x6c, 10);
        clks[IMX6SL_CLK_EPIT1]        = imx_clk_gate2("epit1",        "perclk",            base + 0x6c, 12);
        clks[IMX6SL_CLK_EPIT2]        = imx_clk_gate2("epit2",        "perclk",            base + 0x6c, 14);
        clks[IMX6SL_CLK_EXTERN_AUDIO] = imx_clk_gate2("extern_audio", "extern_audio_podf", base + 0x6c, 16);
index dd0cc677d5960bf2b82d0821ca1b15582f60f4b3..660ca6feff4024fe8cd51bea46463be542ebf38e 100644 (file)
@@ -480,25 +480,18 @@ static const struct of_device_id ebi_match[] = {
 static void __init ap_init_of(void)
 {
        unsigned long sc_dec;
-       struct device_node *root;
        struct device_node *syscon;
        struct device_node *ebi;
        struct device *parent;
        struct soc_device *soc_dev;
        struct soc_device_attribute *soc_dev_attr;
        u32 ap_sc_id;
-       int err;
        int i;
 
-       /* Here we create an SoC device for the root node */
-       root = of_find_node_by_path("/");
-       if (!root)
-               return;
-
-       syscon = of_find_matching_node(root, ap_syscon_match);
+       syscon = of_find_matching_node(NULL, ap_syscon_match);
        if (!syscon)
                return;
-       ebi = of_find_matching_node(root, ebi_match);
+       ebi = of_find_matching_node(NULL, ebi_match);
        if (!ebi)
                return;
 
@@ -509,19 +502,17 @@ static void __init ap_init_of(void)
        if (!ebi_base)
                return;
 
+       of_platform_populate(NULL, of_default_bus_match_table,
+                       ap_auxdata_lookup, NULL);
+
        ap_sc_id = readl(ap_syscon_base);
 
        soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
        if (!soc_dev_attr)
                return;
 
-       err = of_property_read_string(root, "compatible",
-                                     &soc_dev_attr->soc_id);
-       if (err)
-               return;
-       err = of_property_read_string(root, "model", &soc_dev_attr->machine);
-       if (err)
-               return;
+       soc_dev_attr->soc_id = "XVC";
+       soc_dev_attr->machine = "Integrator/AP";
        soc_dev_attr->family = "Integrator";
        soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c",
                                           'A' + (ap_sc_id & 0x0f));
@@ -536,9 +527,6 @@ static void __init ap_init_of(void)
        parent = soc_device_to_device(soc_dev);
        integrator_init_sysfs(parent, ap_sc_id);
 
-       of_platform_populate(root, of_default_bus_match_table,
-                       ap_auxdata_lookup, parent);
-
        sc_dec = readl(ap_syscon_base + INTEGRATOR_SC_DEC_OFFSET);
        for (i = 0; i < 4; i++) {
                struct lm_device *lmdev;
index a938242b0c95ce78e17296f7382a086ae49785fa..0e57f8f820a54ec040270449e8d16487c7a8edc9 100644 (file)
@@ -279,20 +279,13 @@ static const struct of_device_id intcp_syscon_match[] = {
 
 static void __init intcp_init_of(void)
 {
-       struct device_node *root;
        struct device_node *cpcon;
        struct device *parent;
        struct soc_device *soc_dev;
        struct soc_device_attribute *soc_dev_attr;
        u32 intcp_sc_id;
-       int err;
 
-       /* Here we create an SoC device for the root node */
-       root = of_find_node_by_path("/");
-       if (!root)
-               return;
-
-       cpcon = of_find_matching_node(root, intcp_syscon_match);
+       cpcon = of_find_matching_node(NULL, intcp_syscon_match);
        if (!cpcon)
                return;
 
@@ -300,19 +293,17 @@ static void __init intcp_init_of(void)
        if (!intcp_con_base)
                return;
 
+       of_platform_populate(NULL, of_default_bus_match_table,
+                            intcp_auxdata_lookup, NULL);
+
        intcp_sc_id = readl(intcp_con_base);
 
        soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
        if (!soc_dev_attr)
                return;
 
-       err = of_property_read_string(root, "compatible",
-                                     &soc_dev_attr->soc_id);
-       if (err)
-               return;
-       err = of_property_read_string(root, "model", &soc_dev_attr->machine);
-       if (err)
-               return;
+       soc_dev_attr->soc_id = "XCV";
+       soc_dev_attr->machine = "Integrator/CP";
        soc_dev_attr->family = "Integrator";
        soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c",
                                           'A' + (intcp_sc_id & 0x0f));
@@ -326,8 +317,6 @@ static void __init intcp_init_of(void)
 
        parent = soc_device_to_device(soc_dev);
        integrator_init_sysfs(parent, intcp_sc_id);
-       of_platform_populate(root, of_default_bus_match_table,
-                       intcp_auxdata_lookup, parent);
 }
 
 static const char * intcp_dt_board_compat[] = {
index 4a7c250c9a3036533ca9c62f36f4ee7cf2b88756..b9bc599a5fd04fa8bc4b7b6e13ad1073fb4c513a 100644 (file)
@@ -10,6 +10,7 @@ menuconfig ARCH_MVEBU
        select ZONE_DMA if ARM_LPAE
        select ARCH_REQUIRE_GPIOLIB
        select PCI_QUIRKS if PCI
+       select OF_ADDRESS_PCI
 
 if ARCH_MVEBU
 
@@ -17,6 +18,7 @@ config MACH_MVEBU_V7
        bool
        select ARMADA_370_XP_TIMER
        select CACHE_L2X0
+       select ARM_CPU_SUSPEND
 
 config MACH_ARMADA_370
        bool "Marvell Armada 370 boards" if ARCH_MULTI_V7
index 062505345c953c80391226d0cc94ee3a89f0300f..1c1ed737f7ab763437c81a8223fd1a05fc2850a5 100644 (file)
@@ -34,8 +34,8 @@ config ARCH_OMAP4
        select HAVE_ARM_SCU if SMP
        select HAVE_ARM_TWD if SMP
        select OMAP_INTERCONNECT
-       select PL310_ERRATA_588369
-       select PL310_ERRATA_727915
+       select PL310_ERRATA_588369 if CACHE_L2X0
+       select PL310_ERRATA_727915 if CACHE_L2X0
        select PM_OPP if PM
        select PM_RUNTIME if CPU_IDLE
        select ARM_ERRATA_754322
index 7e33e9d2c42e73e468209b361bde1eaac48b62c7..878e9ec97d0fc810249628533c3ff4acc2e11d15 100644 (file)
@@ -11,8 +11,8 @@ menuconfig ARCH_STI
        select ARM_ERRATA_754322
        select ARM_ERRATA_764369 if SMP
        select ARM_ERRATA_775420
-       select PL310_ERRATA_753970 if CACHE_PL310
-       select PL310_ERRATA_769419 if CACHE_PL310
+       select PL310_ERRATA_753970 if CACHE_L2X0
+       select PL310_ERRATA_769419 if CACHE_L2X0
        help
          Include support for STiH41x SOCs like STiH415/416 using the device tree
          for discovery
index 5be7c4583a9348855689c94829b7b6f96e71c02a..699e8601dbf0bb935e2edb5887c998a1a1228242 100644 (file)
@@ -15,7 +15,7 @@ menuconfig ARCH_U8500
        select PINCTRL
        select PINCTRL_ABX500
        select PINCTRL_NOMADIK
-       select PL310_ERRATA_753970 if CACHE_PL310
+       select PL310_ERRATA_753970 if CACHE_L2X0
        help
          Support for ST-Ericsson's Ux500 architecture
 
index 99c1f151c403c07cdeb5b1f575d65a64594d6d60..d8b9330f896a3edac20a11355d8b60b9a613f824 100644 (file)
@@ -43,7 +43,7 @@ config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
        bool "Enable A5 and A9 only errata work-arounds"
        default y
        select ARM_ERRATA_720789
-       select PL310_ERRATA_753970 if CACHE_PL310
+       select PL310_ERRATA_753970 if CACHE_L2X0
        help
          Provides common dependencies for Versatile Express platforms
          based on Cortex-A5 and Cortex-A9 processors. In order to
index eda0dd0ab97bf02bc6759b2018ae05818d4006c3..c348eaee7ee29df402ce9103a94174cbb1883213 100644 (file)
@@ -889,9 +889,10 @@ config CACHE_L2X0
        help
          This option enables the L2x0 PrimeCell.
 
+if CACHE_L2X0
+
 config CACHE_PL310
        bool
-       depends on CACHE_L2X0
        default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
        help
          This option enables optimisations for the PL310 cache
@@ -899,7 +900,6 @@ config CACHE_PL310
 
 config PL310_ERRATA_588369
        bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
-       depends on CACHE_L2X0
        help
           The PL310 L2 cache controller implements three types of Clean &
           Invalidate maintenance operations: by Physical Address
@@ -912,7 +912,6 @@ config PL310_ERRATA_588369
 
 config PL310_ERRATA_727915
        bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
-       depends on CACHE_L2X0
        help
          PL310 implements the Clean & Invalidate by Way L2 cache maintenance
          operation (offset 0x7FC). This operation runs in background so that
@@ -923,7 +922,6 @@ config PL310_ERRATA_727915
 
 config PL310_ERRATA_753970
        bool "PL310 errata: cache sync operation may be faulty"
-       depends on CACHE_PL310
        help
          This option enables the workaround for the 753970 PL310 (r3p0) erratum.
 
@@ -938,7 +936,6 @@ config PL310_ERRATA_753970
 
 config PL310_ERRATA_769419
        bool "PL310 errata: no automatic Store Buffer drain"
-       depends on CACHE_L2X0
        help
          On revisions of the PL310 prior to r3p2, the Store Buffer does
          not automatically drain. This can cause normal, non-cacheable
@@ -948,6 +945,8 @@ config PL310_ERRATA_769419
          on systems with an outer cache, the store buffer is drained
          explicitly.
 
+endif
+
 config CACHE_TAUROS2
        bool "Enable the Tauros2 L2 cache controller"
        depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4)
index 97448c3acf38a571cb96d1c9871952d56c605343..ba0d58e1a2a2bf7ede98394c741f58e0aab63dcb 100644 (file)
@@ -502,6 +502,7 @@ __\name\()_proc_info:
        .long   \cpu_val
        .long   \cpu_mask
        .long   PMD_TYPE_SECT | \
+               PMD_SECT_CACHEABLE | \
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
index 1dd275dc8f653a0b6be00f469998bfa8711044a0..7b485876cad495ac02e70bbcfb5b06f411a797c8 100644 (file)
@@ -8,6 +8,7 @@
 #define force_o_largefile()    \
                (personality(current->personality) != PER_LINUX32)
 
+#include <linux/personality.h>
 #include <asm-generic/fcntl.h>
 
 #endif /* _ASM_IA64_FCNTL_H */
index 790352f937004fde97c2430c67d0823fc382d808..35d16bd2760b745d1fa6c8d97093a8eb4a381b3e 100644 (file)
@@ -303,7 +303,6 @@ config PPC_EARLY_DEBUG_OPAL_VTERMNO
          This correspond to which /dev/hvcN you want to use for early
          debug.
 
-         On OPAL v1 (takeover) this should always be 0
          On OPAL v2, this will be 0 for network console and 1 or 2 for
          the machine built-in serial ports.
 
index 37991e154ef88faf50b7b8b447c1772877a253bf..840a5509b3f19b37f5dd1d92f43718c568d49453 100644 (file)
@@ -88,4 +88,15 @@ static inline unsigned long ppc_function_entry(void *func)
 #endif
 }
 
+static inline unsigned long ppc_global_function_entry(void *func)
+{
+#if defined(CONFIG_PPC64) && defined(_CALL_ELF) && _CALL_ELF == 2
+       /* PPC64 ABIv2 the global entry point is at the address */
+       return (unsigned long)func;
+#else
+       /* All other cases there is no change vs ppc_function_entry() */
+       return ppc_function_entry(func);
+#endif
+}
+
 #endif /* _ASM_POWERPC_CODE_PATCHING_H */
index 460018889ba9b228c723855557ed338d1b5a6847..0da1dbd42e02123c44007347fd0d4f89e2796b14 100644 (file)
 #ifndef __OPAL_H
 #define __OPAL_H
 
-/****** Takeover interface ********/
-
-/* PAPR H-Call used to querty the HAL existence and/or instanciate
- * it from within pHyp (tech preview only).
- *
- * This is exclusively used in prom_init.c
- */
-
 #ifndef __ASSEMBLY__
-
-struct opal_takeover_args {
-       u64     k_image;                /* r4 */
-       u64     k_size;                 /* r5 */
-       u64     k_entry;                /* r6 */
-       u64     k_entry2;               /* r7 */
-       u64     hal_addr;               /* r8 */
-       u64     rd_image;               /* r9 */
-       u64     rd_size;                /* r10 */
-       u64     rd_loc;                 /* r11 */
-};
-
 /*
  * SG entry
  *
@@ -55,15 +35,6 @@ struct opal_sg_list {
 /* We calculate number of sg entries based on PAGE_SIZE */
 #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
 
-extern long opal_query_takeover(u64 *hal_size, u64 *hal_align);
-
-extern long opal_do_takeover(struct opal_takeover_args *args);
-
-struct rtas_args;
-extern int opal_enter_rtas(struct rtas_args *args,
-                          unsigned long data,
-                          unsigned long entry);
-
 #endif /* __ASSEMBLY__ */
 
 /****** OPAL APIs ******/
index b9bd1ca944d086706f15218d9b80f695aff5fb2b..96f59de61855335214aa8e7ae73e07bdee9b1ae3 100644 (file)
@@ -9,10 +9,6 @@
 
 #include <uapi/asm/swab.h>
 
-#ifdef __GNUC__
-#ifndef __powerpc64__
-#endif /* __powerpc64__ */
-
 static __inline__ __u16 ld_le16(const volatile __u16 *addr)
 {
        __u16 val;
@@ -20,19 +16,12 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr)
        __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
        return val;
 }
-#define __arch_swab16p ld_le16
 
 static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
 {
        __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
 }
 
-static inline void __arch_swab16s(__u16 *addr)
-{
-       st_le16(addr, *addr);
-}
-#define __arch_swab16s __arch_swab16s
-
 static __inline__ __u32 ld_le32(const volatile __u32 *addr)
 {
        __u32 val;
@@ -40,42 +29,10 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr)
        __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
        return val;
 }
-#define __arch_swab32p ld_le32
 
 static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
 {
        __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
 }
 
-static inline void __arch_swab32s(__u32 *addr)
-{
-       st_le32(addr, *addr);
-}
-#define __arch_swab32s __arch_swab32s
-
-static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
-{
-       __u16 result;
-
-       __asm__("rlwimi %0,%1,8,16,23"
-           : "=r" (result)
-           : "r" (value), "0" (value >> 8));
-       return result;
-}
-#define __arch_swab16 __arch_swab16
-
-static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
-{
-       __u32 result;
-
-       __asm__("rlwimi %0,%1,24,16,23\n\t"
-           "rlwimi %0,%1,8,8,15\n\t"
-           "rlwimi %0,%1,24,0,7"
-           : "=r" (result)
-           : "r" (value), "0" (value >> 24));
-       return result;
-}
-#define __arch_swab32 __arch_swab32
-
-#endif /* __GNUC__ */
 #endif /* _ASM_POWERPC_SWAB_H */
index f202d0731b065466ba8151df98b11cd6a3cd720c..d178834fe508443816ea4f96481c530a9517c977 100644 (file)
@@ -10,6 +10,8 @@
  *
  */
 
+#define pr_fmt(fmt) "ftrace-powerpc: " fmt
+
 #include <linux/spinlock.h>
 #include <linux/hardirq.h>
 #include <linux/uaccess.h>
@@ -105,7 +107,7 @@ __ftrace_make_nop(struct module *mod,
                  struct dyn_ftrace *rec, unsigned long addr)
 {
        unsigned int op;
-       unsigned long ptr;
+       unsigned long entry, ptr;
        unsigned long ip = rec->ip;
        void *tramp;
 
@@ -115,7 +117,7 @@ __ftrace_make_nop(struct module *mod,
 
        /* Make sure that that this is still a 24bit jump */
        if (!is_bl_op(op)) {
-               printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
+               pr_err("Not expected bl: opcode is %x\n", op);
                return -EINVAL;
        }
 
@@ -125,21 +127,21 @@ __ftrace_make_nop(struct module *mod,
        pr_devel("ip:%lx jumps to %p", ip, tramp);
 
        if (!is_module_trampoline(tramp)) {
-               printk(KERN_ERR "Not a trampoline\n");
+               pr_err("Not a trampoline\n");
                return -EINVAL;
        }
 
        if (module_trampoline_target(mod, tramp, &ptr)) {
-               printk(KERN_ERR "Failed to get trampoline target\n");
+               pr_err("Failed to get trampoline target\n");
                return -EFAULT;
        }
 
        pr_devel("trampoline target %lx", ptr);
 
+       entry = ppc_global_function_entry((void *)addr);
        /* This should match what was called */
-       if (ptr != ppc_function_entry((void *)addr)) {
-               printk(KERN_ERR "addr %lx does not match expected %lx\n",
-                       ptr, ppc_function_entry((void *)addr));
+       if (ptr != entry) {
+               pr_err("addr %lx does not match expected %lx\n", ptr, entry);
                return -EINVAL;
        }
 
@@ -179,7 +181,7 @@ __ftrace_make_nop(struct module *mod,
 
        /* Make sure that that this is still a 24bit jump */
        if (!is_bl_op(op)) {
-               printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
+               pr_err("Not expected bl: opcode is %x\n", op);
                return -EINVAL;
        }
 
@@ -198,7 +200,7 @@ __ftrace_make_nop(struct module *mod,
 
        /* Find where the trampoline jumps to */
        if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
-               printk(KERN_ERR "Failed to read %lx\n", tramp);
+               pr_err("Failed to read %lx\n", tramp);
                return -EFAULT;
        }
 
@@ -209,7 +211,7 @@ __ftrace_make_nop(struct module *mod,
            ((jmp[1] & 0xffff0000) != 0x398c0000) ||
            (jmp[2] != 0x7d8903a6) ||
            (jmp[3] != 0x4e800420)) {
-               printk(KERN_ERR "Not a trampoline\n");
+               pr_err("Not a trampoline\n");
                return -EINVAL;
        }
 
@@ -221,8 +223,7 @@ __ftrace_make_nop(struct module *mod,
        pr_devel(" %lx ", tramp);
 
        if (tramp != addr) {
-               printk(KERN_ERR
-                      "Trampoline location %08lx does not match addr\n",
+               pr_err("Trampoline location %08lx does not match addr\n",
                       tramp);
                return -EINVAL;
        }
@@ -263,15 +264,13 @@ int ftrace_make_nop(struct module *mod,
         */
        if (!rec->arch.mod) {
                if (!mod) {
-                       printk(KERN_ERR "No module loaded addr=%lx\n",
-                              addr);
+                       pr_err("No module loaded addr=%lx\n", addr);
                        return -EFAULT;
                }
                rec->arch.mod = mod;
        } else if (mod) {
                if (mod != rec->arch.mod) {
-                       printk(KERN_ERR
-                              "Record mod %p not equal to passed in mod %p\n",
+                       pr_err("Record mod %p not equal to passed in mod %p\n",
                               rec->arch.mod, mod);
                        return -EINVAL;
                }
@@ -307,26 +306,25 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
         * The load offset is different depending on the ABI. For simplicity
         * just mask it out when doing the compare.
         */
-       if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) {
-               printk(KERN_ERR "Unexpected call sequence: %x %x\n",
-                       op[0], op[1]);
+       if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) {
+               pr_err("Unexpected call sequence: %x %x\n", op[0], op[1]);
                return -EINVAL;
        }
 
        /* If we never set up a trampoline to ftrace_caller, then bail */
        if (!rec->arch.mod->arch.tramp) {
-               printk(KERN_ERR "No ftrace trampoline\n");
+               pr_err("No ftrace trampoline\n");
                return -EINVAL;
        }
 
        /* Ensure branch is within 24 bits */
-       if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
-               printk(KERN_ERR "Branch out of range");
+       if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
+               pr_err("Branch out of range\n");
                return -EINVAL;
        }
 
        if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
-               printk(KERN_ERR "REL24 out of range!\n");
+               pr_err("REL24 out of range!\n");
                return -EINVAL;
        }
 
@@ -345,13 +343,13 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        /* It should be pointing to a nop */
        if (op != PPC_INST_NOP) {
-               printk(KERN_ERR "Expected NOP but have %x\n", op);
+               pr_err("Expected NOP but have %x\n", op);
                return -EINVAL;
        }
 
        /* If we never set up a trampoline to ftrace_caller, then bail */
        if (!rec->arch.mod->arch.tramp) {
-               printk(KERN_ERR "No ftrace trampoline\n");
+               pr_err("No ftrace trampoline\n");
                return -EINVAL;
        }
 
@@ -359,7 +357,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
        op = create_branch((unsigned int *)ip,
                           rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
        if (!op) {
-               printk(KERN_ERR "REL24 out of range!\n");
+               pr_err("REL24 out of range!\n");
                return -EINVAL;
        }
 
@@ -397,7 +395,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
         * already have a module defined.
         */
        if (!rec->arch.mod) {
-               printk(KERN_ERR "No module loaded\n");
+               pr_err("No module loaded\n");
                return -EINVAL;
        }
 
index b82227e7e21bbba3cb22456c0c9248bb65bbd462..12e48d56f771e85399897ac7749a8c99d1d6440d 100644 (file)
@@ -23,7 +23,7 @@ unsigned int ioread16(void __iomem *addr)
 }
 unsigned int ioread16be(void __iomem *addr)
 {
-       return in_be16(addr);
+       return readw_be(addr);
 }
 unsigned int ioread32(void __iomem *addr)
 {
@@ -31,7 +31,7 @@ unsigned int ioread32(void __iomem *addr)
 }
 unsigned int ioread32be(void __iomem *addr)
 {
-       return in_be32(addr);
+       return readl_be(addr);
 }
 EXPORT_SYMBOL(ioread8);
 EXPORT_SYMBOL(ioread16);
@@ -49,7 +49,7 @@ void iowrite16(u16 val, void __iomem *addr)
 }
 void iowrite16be(u16 val, void __iomem *addr)
 {
-       out_be16(addr, val);
+       writew_be(val, addr);
 }
 void iowrite32(u32 val, void __iomem *addr)
 {
@@ -57,7 +57,7 @@ void iowrite32(u32 val, void __iomem *addr)
 }
 void iowrite32be(u32 val, void __iomem *addr)
 {
-       out_be32(addr, val);
+       writel_be(val, addr);
 }
 EXPORT_SYMBOL(iowrite8);
 EXPORT_SYMBOL(iowrite16);
@@ -75,15 +75,15 @@ EXPORT_SYMBOL(iowrite32be);
  */
 void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
 {
-       _insb((u8 __iomem *) addr, dst, count);
+       readsb(addr, dst, count);
 }
 void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
 {
-       _insw_ns((u16 __iomem *) addr, dst, count);
+       readsw(addr, dst, count);
 }
 void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
 {
-       _insl_ns((u32 __iomem *) addr, dst, count);
+       readsl(addr, dst, count);
 }
 EXPORT_SYMBOL(ioread8_rep);
 EXPORT_SYMBOL(ioread16_rep);
@@ -91,15 +91,15 @@ EXPORT_SYMBOL(ioread32_rep);
 
 void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
 {
-       _outsb((u8 __iomem *) addr, src, count);
+       writesb(addr, src, count);
 }
 void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
 {
-       _outsw_ns((u16 __iomem *) addr, src, count);
+       writesw(addr, src, count);
 }
 void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
 {
-       _outsl_ns((u32 __iomem *) addr, src, count);
+       writesl(addr, src, count);
 }
 EXPORT_SYMBOL(iowrite8_rep);
 EXPORT_SYMBOL(iowrite16_rep);
index 90fab64d911dcdca1cbcf4992e1bda00160db6a4..2f72af82513c71d2d347c0373eaa0714e482ae5d 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/kdebug.h>
 #include <linux/slab.h>
+#include <asm/code-patching.h>
 #include <asm/cacheflush.h>
 #include <asm/sstep.h>
 #include <asm/uaccess.h>
@@ -491,12 +492,10 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        return ret;
 }
 
-#ifdef CONFIG_PPC64
 unsigned long arch_deref_entry_point(void *entry)
 {
-       return ((func_descr_t *)entry)->entry;
+       return ppc_global_function_entry(entry);
 }
-#endif
 
 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
@@ -508,7 +507,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
        /* setup return addr to the jprobe handler routine */
        regs->nip = arch_deref_entry_point(jp->entry);
 #ifdef CONFIG_PPC64
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+       regs->gpr[12] = (unsigned long)jp->entry;
+#else
        regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
+#endif
 #endif
 
        return 1;
index 077d2ce6c5a7c64b2a51404e7e33fbb5004541b9..d807ee626af9c32a1a258c673c508e6bb9519081 100644 (file)
@@ -315,8 +315,17 @@ static void dedotify_versions(struct modversion_info *vers,
        struct modversion_info *end;
 
        for (end = (void *)vers + size; vers < end; vers++)
-               if (vers->name[0] == '.')
+               if (vers->name[0] == '.') {
                        memmove(vers->name, vers->name+1, strlen(vers->name));
+#ifdef ARCH_RELOCATES_KCRCTAB
+                       /* The TOC symbol has no CRC computed. To avoid CRC
+                        * check failing, we must force it to the expected
+                        * value (see CRC check in module.c).
+                        */
+                       if (!strcmp(vers->name, "TOC."))
+                               vers->crc = -(unsigned long)reloc_start;
+#endif
+               }
 }
 
 /* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */
index 613a860a203c9665287910d4218498de40432973..b694b0730971e1eba7a5ba4cebe7b91d3acf9279 100644 (file)
@@ -662,13 +662,6 @@ void __init early_init_devtree(void *params)
        of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
 #endif
 
-       /* Pre-initialize the cmd_line with the content of boot_commmand_line,
-        * which will be empty except when the content of the variable has
-        * been overriden by a bootloading mechanism. This happens typically
-        * with HAL takeover
-        */
-       strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
-
        /* Retrieve various informations from the /chosen node of the
         * device-tree, including the platform type, initrd location and
         * size, TCE reserve, and more ...
index 078145acf7fb867dca6278f776a935ba0b2ecf25..1a85d8f96739550a6a88bc6ad94484a1431955cd 100644 (file)
@@ -1268,201 +1268,6 @@ static u64 __initdata prom_opal_base;
 static u64 __initdata prom_opal_entry;
 #endif
 
-#ifdef __BIG_ENDIAN__
-/* XXX Don't change this structure without updating opal-takeover.S */
-static struct opal_secondary_data {
-       s64                             ack;    /*  0 */
-       u64                             go;     /*  8 */
-       struct opal_takeover_args       args;   /* 16 */
-} opal_secondary_data;
-
-static u64 __initdata prom_opal_align;
-static u64 __initdata prom_opal_size;
-static int __initdata prom_rtas_start_cpu;
-static u64 __initdata prom_rtas_data;
-static u64 __initdata prom_rtas_entry;
-
-extern char opal_secondary_entry;
-
-static void __init prom_query_opal(void)
-{
-       long rc;
-
-       /* We must not query for OPAL presence on a machine that
-        * supports TNK takeover (970 blades), as this uses the same
-        * h-call with different arguments and will crash
-        */
-       if (PHANDLE_VALID(call_prom("finddevice", 1, 1,
-                                   ADDR("/tnk-memory-map")))) {
-               prom_printf("TNK takeover detected, skipping OPAL check\n");
-               return;
-       }
-
-       prom_printf("Querying for OPAL presence... ");
-
-       rc = opal_query_takeover(&prom_opal_size,
-                                &prom_opal_align);
-       prom_debug("(rc = %ld) ", rc);
-       if (rc != 0) {
-               prom_printf("not there.\n");
-               return;
-       }
-       of_platform = PLATFORM_OPAL;
-       prom_printf(" there !\n");
-       prom_debug("  opal_size  = 0x%lx\n", prom_opal_size);
-       prom_debug("  opal_align = 0x%lx\n", prom_opal_align);
-       if (prom_opal_align < 0x10000)
-               prom_opal_align = 0x10000;
-}
-
-static int __init prom_rtas_call(int token, int nargs, int nret,
-                                int *outputs, ...)
-{
-       struct rtas_args rtas_args;
-       va_list list;
-       int i;
-
-       rtas_args.token = token;
-       rtas_args.nargs = nargs;
-       rtas_args.nret  = nret;
-       rtas_args.rets  = (rtas_arg_t *)&(rtas_args.args[nargs]);
-       va_start(list, outputs);
-       for (i = 0; i < nargs; ++i)
-               rtas_args.args[i] = va_arg(list, rtas_arg_t);
-       va_end(list);
-
-       for (i = 0; i < nret; ++i)
-               rtas_args.rets[i] = 0;
-
-       opal_enter_rtas(&rtas_args, prom_rtas_data,
-                       prom_rtas_entry);
-
-       if (nret > 1 && outputs != NULL)
-               for (i = 0; i < nret-1; ++i)
-                       outputs[i] = rtas_args.rets[i+1];
-       return (nret > 0)? rtas_args.rets[0]: 0;
-}
-
-static void __init prom_opal_hold_cpus(void)
-{
-       int i, cnt, cpu, rc;
-       long j;
-       phandle node;
-       char type[64];
-       u32 servers[8];
-       void *entry = (unsigned long *)&opal_secondary_entry;
-       struct opal_secondary_data *data = &opal_secondary_data;
-
-       prom_debug("prom_opal_hold_cpus: start...\n");
-       prom_debug("    - entry       = 0x%x\n", entry);
-       prom_debug("    - data        = 0x%x\n", data);
-
-       data->ack = -1;
-       data->go = 0;
-
-       /* look for cpus */
-       for (node = 0; prom_next_node(&node); ) {
-               type[0] = 0;
-               prom_getprop(node, "device_type", type, sizeof(type));
-               if (strcmp(type, "cpu") != 0)
-                       continue;
-
-               /* Skip non-configured cpus. */
-               if (prom_getprop(node, "status", type, sizeof(type)) > 0)
-                       if (strcmp(type, "okay") != 0)
-                               continue;
-
-               cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
-                            sizeof(servers));
-               if (cnt == PROM_ERROR)
-                       break;
-               cnt >>= 2;
-               for (i = 0; i < cnt; i++) {
-                       cpu = servers[i];
-                       prom_debug("CPU %d ... ", cpu);
-                       if (cpu == prom.cpu) {
-                               prom_debug("booted !\n");
-                               continue;
-                       }
-                       prom_debug("starting ... ");
-
-                       /* Init the acknowledge var which will be reset by
-                        * the secondary cpu when it awakens from its OF
-                        * spinloop.
-                        */
-                       data->ack = -1;
-                       rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
-                                           NULL, cpu, entry, data);
-                       prom_debug("rtas rc=%d ...", rc);
-
-                       for (j = 0; j < 100000000 && data->ack == -1; j++) {
-                               HMT_low();
-                               mb();
-                       }
-                       HMT_medium();
-                       if (data->ack != -1)
-                               prom_debug("done, PIR=0x%x\n", data->ack);
-                       else
-                               prom_debug("timeout !\n");
-               }
-       }
-       prom_debug("prom_opal_hold_cpus: end...\n");
-}
-
-static void __init prom_opal_takeover(void)
-{
-       struct opal_secondary_data *data = &opal_secondary_data;
-       struct opal_takeover_args *args = &data->args;
-       u64 align = prom_opal_align;
-       u64 top_addr, opal_addr;
-
-       args->k_image   = (u64)_stext;
-       args->k_size    = _end - _stext;
-       args->k_entry   = 0;
-       args->k_entry2  = 0x60;
-
-       top_addr = _ALIGN_UP(args->k_size, align);
-
-       if (prom_initrd_start != 0) {
-               args->rd_image = prom_initrd_start;
-               args->rd_size = prom_initrd_end - args->rd_image;
-               args->rd_loc = top_addr;
-               top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
-       }
-
-       /* Pickup an address for the HAL. We want to go really high
-        * up to avoid problem with future kexecs. On the other hand
-        * we don't want to be all over the TCEs on P5IOC2 machines
-        * which are going to be up there too. We assume the machine
-        * has plenty of memory, and we ask for the HAL for now to
-        * be just below the 1G point, or above the initrd
-        */
-       opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
-       if (opal_addr < top_addr)
-               opal_addr = top_addr;
-       args->hal_addr = opal_addr;
-
-       /* Copy the command line to the kernel image */
-       strlcpy(boot_command_line, prom_cmd_line,
-               COMMAND_LINE_SIZE);
-
-       prom_debug("  k_image    = 0x%lx\n", args->k_image);
-       prom_debug("  k_size     = 0x%lx\n", args->k_size);
-       prom_debug("  k_entry    = 0x%lx\n", args->k_entry);
-       prom_debug("  k_entry2   = 0x%lx\n", args->k_entry2);
-       prom_debug("  hal_addr   = 0x%lx\n", args->hal_addr);
-       prom_debug("  rd_image   = 0x%lx\n", args->rd_image);
-       prom_debug("  rd_size    = 0x%lx\n", args->rd_size);
-       prom_debug("  rd_loc     = 0x%lx\n", args->rd_loc);
-       prom_printf("Performing OPAL takeover,this can take a few minutes..\n");
-       prom_close_stdin();
-       mb();
-       data->go = 1;
-       for (;;)
-               opal_do_takeover(args);
-}
-#endif /* __BIG_ENDIAN__ */
-
 /*
  * Allocate room for and instantiate OPAL
  */
@@ -1597,12 +1402,6 @@ static void __init prom_instantiate_rtas(void)
                         &val, sizeof(val)) != PROM_ERROR)
                rtas_has_query_cpu_stopped = true;
 
-#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
-       /* PowerVN takeover hack */
-       prom_rtas_data = base;
-       prom_rtas_entry = entry;
-       prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
-#endif
        prom_debug("rtas base     = 0x%x\n", base);
        prom_debug("rtas entry    = 0x%x\n", entry);
        prom_debug("rtas size     = 0x%x\n", (long)size);
@@ -3027,16 +2826,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
                prom_instantiate_rtas();
 
 #ifdef CONFIG_PPC_POWERNV
-#ifdef __BIG_ENDIAN__
-       /* Detect HAL and try instanciating it & doing takeover */
-       if (of_platform == PLATFORM_PSERIES_LPAR) {
-               prom_query_opal();
-               if (of_platform == PLATFORM_OPAL) {
-                       prom_opal_hold_cpus();
-                       prom_opal_takeover();
-               }
-       } else
-#endif /* __BIG_ENDIAN__ */
        if (of_platform == PLATFORM_OPAL)
                prom_instantiate_opal();
 #endif /* CONFIG_PPC_POWERNV */
index 77aa1e95e9046d39908048d8a711c44c83841415..fe8e54b9ef7db5bddd01b3917741cadbc0746fa3 100644 (file)
@@ -21,9 +21,7 @@ _end enter_prom memcpy memset reloc_offset __secondary_hold
 __secondary_hold_acknowledge __secondary_hold_spinloop __start
 strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
 reloc_got2 kernstart_addr memstart_addr linux_banner _stext
-opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry
-boot_command_line __prom_init_toc_start __prom_init_toc_end
-btext_setup_display TOC."
+__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
 
 NM="$1"
 OBJ="$2"
index e239df3768acfb3e14c55367c7ab737233f50771..e5b022c55ccd3a62ec11454e315717a89d544209 100644 (file)
@@ -469,9 +469,17 @@ void __init smp_setup_cpu_maps(void)
                }
 
                for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
+                       bool avail;
+
                        DBG("    thread %d -> cpu %d (hard id %d)\n",
                            j, cpu, be32_to_cpu(intserv[j]));
-                       set_cpu_present(cpu, of_device_is_available(dn));
+
+                       avail = of_device_is_available(dn);
+                       if (!avail)
+                               avail = !of_property_match_string(dn,
+                                               "enable-method", "spin-table");
+
+                       set_cpu_present(cpu, avail);
                        set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
                        set_cpu_possible(cpu, true);
                        cpu++;
index 4e47db686b5de5a2ab3e32c72f5cec6cfed9d3d9..1bc5a1755ed4648cfc5658e650e59dda772264b6 100644 (file)
@@ -54,7 +54,6 @@
 
 #include "signal.h"
 
-#undef DEBUG_SIG
 
 #ifdef CONFIG_PPC64
 #define sys_rt_sigreturn       compat_sys_rt_sigreturn
@@ -1063,10 +1062,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
        return 1;
 
 badframe:
-#ifdef DEBUG_SIG
-       printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
-              regs, frame, newsp);
-#endif
        if (show_unhandled_signals)
                printk_ratelimited(KERN_INFO
                                   "%s[%d]: bad frame in handle_rt_signal32: "
@@ -1484,10 +1479,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
        return 1;
 
 badframe:
-#ifdef DEBUG_SIG
-       printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
-              regs, frame, newsp);
-#endif
        if (show_unhandled_signals)
                printk_ratelimited(KERN_INFO
                                   "%s[%d]: bad frame in handle_signal32: "
index d501dc4dc3e6634f2b7ce4a4e8cff271bf0f661e..97c1e4b683fcb4cfdbe871b51ae8b5c23bb72a49 100644 (file)
@@ -38,7 +38,6 @@
 
 #include "signal.h"
 
-#define DEBUG_SIG 0
 
 #define GP_REGS_SIZE   min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
 #define FP_REGS_SIZE   sizeof(elf_fpregset_t)
@@ -700,10 +699,6 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
        return 0;
 
 badframe:
-#if DEBUG_SIG
-       printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
-              regs, uc, &uc->uc_mcontext);
-#endif
        if (show_unhandled_signals)
                printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
                                   current->comm, current->pid, "rt_sigreturn",
@@ -809,10 +804,6 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
        return 1;
 
 badframe:
-#if DEBUG_SIG
-       printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
-              regs, frame, newsp);
-#endif
        if (show_unhandled_signals)
                printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
                                   current->comm, current->pid, "setup_rt_frame",
index 94560db788bfe30327013dadcd94ec3a1eb6aebb..2c15ff09448351f57dcbcace80688c3e5925d38d 100644 (file)
@@ -125,7 +125,7 @@ static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, i
 static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos)
 {
        u64 reg_value;
-       int temp;
+       unsigned int temp;
        u64 new_value;
        int ret;
 
index d55891f89a2ce2bf0c14b22b47564760dd16abf0..4ad227d04c1ab18a8d1b0eba4fbaab7d1726c795 100644 (file)
@@ -1,4 +1,4 @@
-obj-y                  += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o
+obj-y                  += setup.o opal-wrappers.o opal.o opal-async.o
 obj-y                  += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
 obj-y                  += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
 obj-y                  += opal-msglog.o
diff --git a/arch/powerpc/platforms/powernv/opal-takeover.S b/arch/powerpc/platforms/powernv/opal-takeover.S
deleted file mode 100644 (file)
index 11a3169..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * PowerNV OPAL takeover assembly code, for use by prom_init.c
- *
- * Copyright 2011 IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/ppc_asm.h>
-#include <asm/hvcall.h>
-#include <asm/asm-offsets.h>
-#include <asm/opal.h>
-
-#define H_HAL_TAKEOVER                 0x5124
-#define H_HAL_TAKEOVER_QUERY_MAGIC     -1
-
-       .text
-_GLOBAL(opal_query_takeover)
-       mfcr    r0
-       stw     r0,8(r1)
-       stdu    r1,-STACKFRAMESIZE(r1)
-       std     r3,STK_PARAM(R3)(r1)
-       std     r4,STK_PARAM(R4)(r1)
-       li      r3,H_HAL_TAKEOVER
-       li      r4,H_HAL_TAKEOVER_QUERY_MAGIC
-       HVSC
-       addi    r1,r1,STACKFRAMESIZE
-       ld      r10,STK_PARAM(R3)(r1)
-       std     r4,0(r10)
-       ld      r10,STK_PARAM(R4)(r1)
-       std     r5,0(r10)
-       lwz     r0,8(r1)
-       mtcrf   0xff,r0
-       blr
-
-_GLOBAL(opal_do_takeover)
-       mfcr    r0
-       stw     r0,8(r1)
-       mflr    r0
-       std     r0,16(r1)
-       bl      __opal_do_takeover
-       ld      r0,16(r1)
-       mtlr    r0
-       lwz     r0,8(r1)
-       mtcrf   0xff,r0
-       blr
-
-__opal_do_takeover:
-       ld      r4,0(r3)
-       ld      r5,0x8(r3)
-       ld      r6,0x10(r3)
-       ld      r7,0x18(r3)
-       ld      r8,0x20(r3)
-       ld      r9,0x28(r3)
-       ld      r10,0x30(r3)
-       ld      r11,0x38(r3)
-       li      r3,H_HAL_TAKEOVER
-       HVSC
-       blr
-
-       .globl opal_secondary_entry
-opal_secondary_entry:
-       mr      r31,r3
-       mfmsr   r11
-       li      r12,(MSR_SF | MSR_ISF)@highest
-       sldi    r12,r12,48
-       or      r11,r11,r12
-       mtmsrd  r11
-       isync
-       mfspr   r4,SPRN_PIR
-       std     r4,0(r3)
-1:     HMT_LOW
-       ld      r4,8(r3)
-       cmpli   cr0,r4,0
-       beq     1b
-       HMT_MEDIUM
-1:     addi    r3,r31,16
-       bl      __opal_do_takeover
-       b       1b
-
-_GLOBAL(opal_enter_rtas)
-       mflr    r0
-       std     r0,16(r1)
-        stdu   r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
-
-       /* Because PROM is running in 32b mode, it clobbers the high order half
-        * of all registers that it saves.  We therefore save those registers
-        * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
-       */
-       SAVE_GPR(2, r1)
-       SAVE_GPR(13, r1)
-       SAVE_8GPRS(14, r1)
-       SAVE_10GPRS(22, r1)
-       mfcr    r10
-       mfmsr   r11
-       std     r10,_CCR(r1)
-       std     r11,_MSR(r1)
-
-       /* Get the PROM entrypoint */
-       mtlr    r5
-
-       /* Switch MSR to 32 bits mode
-        */
-        li      r12,1
-        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
-        andc    r11,r11,r12
-        li      r12,1
-        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
-        andc    r11,r11,r12
-        mtmsrd  r11
-        isync
-
-       /* Enter RTAS here... */
-       blrl
-
-       /* Just make sure that r1 top 32 bits didn't get
-        * corrupt by OF
-        */
-       rldicl  r1,r1,0,32
-
-       /* Restore the MSR (back to 64 bits) */
-       ld      r0,_MSR(r1)
-       MTMSRD(r0)
-        isync
-
-       /* Restore other registers */
-       REST_GPR(2, r1)
-       REST_GPR(13, r1)
-       REST_8GPRS(14, r1)
-       REST_10GPRS(22, r1)
-       ld      r4,_CCR(r1)
-       mtcr    r4
-
-        addi   r1,r1,PROM_FRAME_SIZE
-       ld      r0,16(r1)
-       mtlr    r0
-       blr
index 62c47bb765178a10fe3aa371ef0c7d5d0c0c6364..9e5353ff6d1bb936fb1b84af74d8f2e6a1dd6da2 100644 (file)
@@ -476,6 +476,11 @@ void __init alloc_dart_table(void)
         */
        dart_tablebase = (unsigned long)
                __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
+       /*
+        * The DART space is later unmapped from the kernel linear mapping and
+        * accessing dart_tablebase during kmemleak scanning will fault.
+        */
+       kmemleak_no_scan((void *)dart_tablebase);
 
        printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
 }
index 375cffcf7dbd1a6de6fe67f528cc264ed7cd8d8a..91d2193813069eb8d7e4572dc48269a8d9d50812 100644 (file)
@@ -89,7 +89,7 @@ static inline unsigned long get_softint(void)
        return retval;
 }
 
-void arch_trigger_all_cpu_backtrace(void);
+void arch_trigger_all_cpu_backtrace(bool);
 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 
 extern void *hardirq_stack[NR_CPUS];
index b2988f25e2300e2a03fa2725461ee3a489deccae..027e099861947655bc59583b0f2a715ac1fb10f2 100644 (file)
@@ -239,7 +239,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
        }
 }
 
-void arch_trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(bool include_self)
 {
        struct thread_info *tp = current_thread_info();
        struct pt_regs *regs = get_irq_regs();
@@ -251,16 +251,22 @@ void arch_trigger_all_cpu_backtrace(void)
 
        spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
 
-       memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
-
        this_cpu = raw_smp_processor_id();
 
-       __global_reg_self(tp, regs, this_cpu);
+       memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
+
+       if (include_self)
+               __global_reg_self(tp, regs, this_cpu);
 
        smp_fetch_global_regs();
 
        for_each_online_cpu(cpu) {
-               struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg;
+               struct global_reg_snapshot *gp;
+
+               if (!include_self && cpu == this_cpu)
+                       continue;
+
+               gp = &global_cpu_snapshot[cpu].reg;
 
                __global_reg_poll(gp);
 
@@ -292,7 +298,7 @@ void arch_trigger_all_cpu_backtrace(void)
 
 static void sysrq_handle_globreg(int key)
 {
-       arch_trigger_all_cpu_backtrace();
+       arch_trigger_all_cpu_backtrace(true);
 }
 
 static struct sysrq_key_op sparc_globalreg_op = {
index cb6cfcd034cf719d626232430ab6af0e46a1874c..a80cbb88ea911e0ab855e804aaca7eac41ad0a85 100644 (file)
@@ -43,7 +43,7 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
 extern void init_ISA_irqs(void);
 
 #ifdef CONFIG_X86_LOCAL_APIC
-void arch_trigger_all_cpu_backtrace(void);
+void arch_trigger_all_cpu_backtrace(bool);
 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 #endif
 
index c3fcb5de508391ca20684669b3746a2c734cad02..6a1e71bde323360d976e58b49ca231443e3e0584 100644 (file)
@@ -33,31 +33,41 @@ static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
 /* "in progress" flag of arch_trigger_all_cpu_backtrace */
 static unsigned long backtrace_flag;
 
-void arch_trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(bool include_self)
 {
        int i;
+       int cpu = get_cpu();
 
-       if (test_and_set_bit(0, &backtrace_flag))
+       if (test_and_set_bit(0, &backtrace_flag)) {
                /*
                 * If there is already a trigger_all_cpu_backtrace() in progress
                 * (backtrace_flag == 1), don't output double cpu dump infos.
                 */
+               put_cpu();
                return;
+       }
 
        cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
+       if (!include_self)
+               cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
 
-       printk(KERN_INFO "sending NMI to all CPUs:\n");
-       apic->send_IPI_all(NMI_VECTOR);
+       if (!cpumask_empty(to_cpumask(backtrace_mask))) {
+               pr_info("sending NMI to %s CPUs:\n",
+                       (include_self ? "all" : "other"));
+               apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR);
+       }
 
        /* Wait for up to 10 seconds for all CPUs to do the backtrace */
        for (i = 0; i < 10 * 1000; i++) {
                if (cpumask_empty(to_cpumask(backtrace_mask)))
                        break;
                mdelay(1);
+               touch_softlockup_watchdog();
        }
 
        clear_bit(0, &backtrace_flag);
        smp_mb__after_atomic();
+       put_cpu();
 }
 
 static int
index 83969f8c5727e79dcb53da459e15d7d1d8dc3834..6467c919c50993ebfc32e14b2ac2860c0b3beae8 100644 (file)
@@ -176,14 +176,24 @@ static int __init cma_activate_area(struct cma *cma)
                base_pfn = pfn;
                for (j = pageblock_nr_pages; j; --j, pfn++) {
                        WARN_ON_ONCE(!pfn_valid(pfn));
+                       /*
+                        * alloc_contig_range requires the pfn range
+                        * specified to be in the same zone. Make this
+                        * simple by forcing the entire CMA resv range
+                        * to be in the same zone.
+                        */
                        if (page_zone(pfn_to_page(pfn)) != zone)
-                               return -EINVAL;
+                               goto err;
                }
                init_cma_reserved_pageblock(pfn_to_page(base_pfn));
        } while (--i);
 
        mutex_init(&cma->lock);
        return 0;
+
+err:
+       kfree(cma->bitmap);
+       return -EINVAL;
 }
 
 static struct cma cma_areas[MAX_CMA_AREAS];
index bbeb404b3a07068ae2d3c94a44d10fbe6101d080..b2c98c1bc037e8c7674722cba6b56f5e4a7383aa 100644 (file)
@@ -1431,6 +1431,14 @@ static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
        return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
 }
 
+static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
+{
+       struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
+
+       return obj_request->img_offset <
+           round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
+}
+
 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
 {
        dout("%s: obj %p (was %d)\n", __func__, obj_request,
@@ -2748,7 +2756,7 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
         */
        if (!img_request_write_test(img_request) ||
                !img_request_layered_test(img_request) ||
-               rbd_dev->parent_overlap <= obj_request->img_offset ||
+               !obj_request_overlaps_parent(obj_request) ||
                ((known = obj_request_known_test(obj_request)) &&
                        obj_request_exists_test(obj_request))) {
 
index 8d6420013a04cabc8a9920bcf09780dde98b4d66..f71d55f5e6e5d7d4b95716555f083a5d08f2a354 100644 (file)
@@ -153,13 +153,10 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
 }
 
 /* Clocksource handling */
-static void exynos4_mct_frc_start(u32 hi, u32 lo)
+static void exynos4_mct_frc_start(void)
 {
        u32 reg;
 
-       exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
-       exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
-
        reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
        reg |= MCT_G_TCON_START;
        exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
@@ -181,7 +178,7 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)
 
 static void exynos4_frc_resume(struct clocksource *cs)
 {
-       exynos4_mct_frc_start(0, 0);
+       exynos4_mct_frc_start();
 }
 
 struct clocksource mct_frc = {
@@ -200,7 +197,7 @@ static u64 notrace exynos4_read_sched_clock(void)
 
 static void __init exynos4_clocksource_init(void)
 {
-       exynos4_mct_frc_start(0, 0);
+       exynos4_mct_frc_start();
 
        if (clocksource_register_hz(&mct_frc, clk_rate))
                panic("%s: can't register clocksource\n", mct_frc.name);
index 23b4a3b28dbcec0152f1f348c4e2f9ff634c0902..4eab93aa570bc23a3fedfee3de346614f4974171 100644 (file)
@@ -1257,7 +1257,8 @@ static unsigned int smu_fpoll(struct file *file, poll_table *wait)
                if (pp->busy && pp->cmd.status != 1)
                        mask |= POLLIN;
                spin_unlock_irqrestore(&pp->lock, flags);
-       } if (pp->mode == smu_file_events) {
+       }
+       if (pp->mode == smu_file_events) {
                /* Not yet implemented */
        }
        return mask;
index 2a635b6fdaf7274ac64b901376cbb1a496d21a4a..c880ba6857541a172bf3529329db54a03730dc3c 100644 (file)
@@ -601,6 +601,7 @@ static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
        pcr->slots[RTSX_MS_CARD].card_event = NULL;
        msh = host->msh;
        host->eject = true;
+       cancel_work_sync(&host->handle_req);
 
        mutex_lock(&host->host_mutex);
        if (host->req) {
index a43d0c467274399b641cd469e38f4f2dcc269dbe..ee9402324a23a13cff9b0e22b74171eaab4e9017 100644 (file)
@@ -54,7 +54,7 @@ config AD525X_DPOT_SPI
 config ATMEL_PWM
        tristate "Atmel AT32/AT91 PWM support"
        depends on HAVE_CLK
-       depends on AVR32 || AT91SAM9263 || AT91SAM9RL || AT91SAM9G45
+       depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
        help
          This option enables device driver support for the PWM channels
          on certain Atmel processors.  Pulse Width Modulation is used for
index c41aca4dfc43f3aad8d8a97f5613424d683b02e2..72000a6d5af0faded0895a42b26889f555d99d46 100644 (file)
@@ -991,7 +991,7 @@ static const struct of_device_id msm_uartdm_table[] = {
        { }
 };
 
-static int __init msm_serial_probe(struct platform_device *pdev)
+static int msm_serial_probe(struct platform_device *pdev)
 {
        struct msm_port *msm_port;
        struct resource *resource;
index 971a760af4a123f6c7c8982d0dcf2fadf09618b7..8dae2f724a35ebfe811864a8c5c428280cfd2a76 100644 (file)
@@ -700,14 +700,6 @@ static void handle_rx_net(struct vhost_work *work)
        handle_rx(net);
 }
 
-static void vhost_net_free(void *addr)
-{
-       if (is_vmalloc_addr(addr))
-               vfree(addr);
-       else
-               kfree(addr);
-}
-
 static int vhost_net_open(struct inode *inode, struct file *f)
 {
        struct vhost_net *n;
@@ -723,7 +715,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
        }
        vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
        if (!vqs) {
-               vhost_net_free(n);
+               kvfree(n);
                return -ENOMEM;
        }
 
@@ -840,7 +832,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
         * since jobs can re-queue themselves. */
        vhost_net_flush(n);
        kfree(n->dev.vqs);
-       vhost_net_free(n);
+       kvfree(n);
        return 0;
 }
 
index 4f4ffa4c604e081755a3b77dba0fccb24c6ded7d..69906cacd04fdc8b3d5236dc030e8bcf749c541f 100644 (file)
@@ -1503,14 +1503,6 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
        return 0;
 }
 
-static void vhost_scsi_free(struct vhost_scsi *vs)
-{
-       if (is_vmalloc_addr(vs))
-               vfree(vs);
-       else
-               kfree(vs);
-}
-
 static int vhost_scsi_open(struct inode *inode, struct file *f)
 {
        struct vhost_scsi *vs;
@@ -1550,7 +1542,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
        return 0;
 
 err_vqs:
-       vhost_scsi_free(vs);
+       kvfree(vs);
 err_vs:
        return r;
 }
@@ -1569,7 +1561,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
        vhost_scsi_flush(vs);
        kfree(vs->dev.vqs);
-       vhost_scsi_free(vs);
+       kvfree(vs);
        return 0;
 }
 
index 4f078c054b41608fe3a8ed325c55d59898b049cb..955947ef3e0263590b64162f5888b81822196415 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1021,6 +1021,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
 
        /* everything turned out well, dispose of the aiocb. */
        kiocb_free(iocb);
+       put_reqs_available(ctx, 1);
 
        /*
         * We have to order our ring_info tail store above and test
@@ -1062,6 +1063,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
        if (head == tail)
                goto out;
 
+       head %= ctx->nr_events;
+       tail %= ctx->nr_events;
+
        while (ret < nr) {
                long avail;
                struct io_event *ev;
@@ -1100,8 +1104,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
        flush_dcache_page(ctx->ring_pages[0]);
 
        pr_debug("%li  h%u t%u\n", ret, head, tail);
-
-       put_reqs_available(ctx, ret);
 out:
        mutex_unlock(&ctx->ring_lock);
 
index a106b3f2b22a1f2cd960b186ff0b24e47b93de6a..fae17c640df3eebb9fe572485709cb395f73a4bf 100644 (file)
@@ -331,6 +331,7 @@ struct dlm_lock_resource
        u16 state;
        char lvb[DLM_LVB_LEN];
        unsigned int inflight_locks;
+       unsigned int inflight_assert_workers;
        unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
 };
 
@@ -910,6 +911,9 @@ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
                                   struct dlm_lock_resource *res);
 
+void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res);
+
 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
index 3087a21d32f9d38dc00b3b9caa9baf548d311589..82abf0cc9a12e2fbc8531f2542830d07ed100293 100644 (file)
@@ -581,6 +581,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
        atomic_set(&res->asts_reserved, 0);
        res->migration_pending = 0;
        res->inflight_locks = 0;
+       res->inflight_assert_workers = 0;
 
        res->dlm = dlm;
 
@@ -683,6 +684,43 @@ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
        wake_up(&res->wq);
 }
 
+void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       assert_spin_locked(&res->spinlock);
+       res->inflight_assert_workers++;
+       mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
+                       dlm->name, res->lockname.len, res->lockname.name,
+                       res->inflight_assert_workers);
+}
+
+static void dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       spin_lock(&res->spinlock);
+       __dlm_lockres_grab_inflight_worker(dlm, res);
+       spin_unlock(&res->spinlock);
+}
+
+static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       assert_spin_locked(&res->spinlock);
+       BUG_ON(res->inflight_assert_workers == 0);
+       res->inflight_assert_workers--;
+       mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
+                       dlm->name, res->lockname.len, res->lockname.name,
+                       res->inflight_assert_workers);
+}
+
+static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       spin_lock(&res->spinlock);
+       __dlm_lockres_drop_inflight_worker(dlm, res);
+       spin_unlock(&res->spinlock);
+}
+
 /*
  * lookup a lock resource by name.
  * may already exist in the hashtable.
@@ -1603,7 +1641,8 @@ send_response:
                        mlog(ML_ERROR, "failed to dispatch assert master work\n");
                        response = DLM_MASTER_RESP_ERROR;
                        dlm_lockres_put(res);
-               }
+               } else
+                       dlm_lockres_grab_inflight_worker(dlm, res);
        } else {
                if (res)
                        dlm_lockres_put(res);
@@ -2118,6 +2157,8 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
        dlm_lockres_release_ast(dlm, res);
 
 put:
+       dlm_lockres_drop_inflight_worker(dlm, res);
+
        dlm_lockres_put(res);
 
        mlog(0, "finished with dlm_assert_master_worker\n");
@@ -3088,11 +3129,15 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
                        /* remove it so that only one mle will be found */
                        __dlm_unlink_mle(dlm, tmp);
                        __dlm_mle_detach_hb_events(dlm, tmp);
-                       ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
-                       mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
-                           "telling master to get ref for cleared out mle "
-                           "during migration\n", dlm->name, namelen, name,
-                           master, new_master);
+                       if (tmp->type == DLM_MLE_MASTER) {
+                               ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
+                               mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
+                                               "telling master to get ref "
+                                               "for cleared out mle during "
+                                               "migration\n", dlm->name,
+                                               namelen, name, master,
+                                               new_master);
+                       }
                }
                spin_unlock(&tmp->spinlock);
        }
index 5de019437ea588c51f5dbc833c679ec27f2ee4c2..45067faf5695d54d74af0933c8ea403ec4f1a5b3 100644 (file)
@@ -1708,7 +1708,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
                                mlog_errno(-ENOMEM);
                                /* retry!? */
                                BUG();
-                       }
+                       } else
+                               __dlm_lockres_grab_inflight_worker(dlm, res);
                } else /* put.. incase we are not the master */
                        dlm_lockres_put(res);
                spin_unlock(&res->spinlock);
index 9db869de829d0ebcf35ff598c3ee4b7541934f26..69aac6f088ada71b3dde009f21f2165a7e5b72f5 100644 (file)
@@ -259,12 +259,15 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
                 * refs on it. */
                unused = __dlm_lockres_unused(lockres);
                if (!unused ||
-                   (lockres->state & DLM_LOCK_RES_MIGRATING)) {
+                   (lockres->state & DLM_LOCK_RES_MIGRATING) ||
+                   (lockres->inflight_assert_workers != 0)) {
                        mlog(0, "%s: res %.*s is in use or being remastered, "
-                            "used %d, state %d\n", dlm->name,
-                            lockres->lockname.len, lockres->lockname.name,
-                            !unused, lockres->state);
-                       list_move_tail(&dlm->purge_list, &lockres->purge);
+                            "used %d, state %d, assert master workers %u\n",
+                            dlm->name, lockres->lockname.len,
+                            lockres->lockname.name,
+                            !unused, lockres->state,
+                            lockres->inflight_assert_workers);
+                       list_move_tail(&lockres->purge, &dlm->purge_list);
                        spin_unlock(&lockres->spinlock);
                        continue;
                }
index 5698b52cf5c984c9e2e7bd21e68a76191dcc5abe..2e3c9dbab68c99b368b5e5762737915e0fc27b77 100644 (file)
@@ -191,7 +191,9 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
                                     DLM_UNLOCK_CLEAR_CONVERT_TYPE);
                } else if (status == DLM_RECOVERING ||
                           status == DLM_MIGRATING ||
-                          status == DLM_FORWARD) {
+                          status == DLM_FORWARD ||
+                          status == DLM_NOLOCKMGR
+                          ) {
                        /* must clear the actions because this unlock
                         * is about to be retried.  cannot free or do
                         * any list manipulation. */
@@ -200,7 +202,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
                             res->lockname.name,
                             status==DLM_RECOVERING?"recovering":
                             (status==DLM_MIGRATING?"migrating":
-                             "forward"));
+                               (status == DLM_FORWARD ? "forward" :
+                                               "nolockmanager")));
                        actions = 0;
                }
                if (flags & LKM_CANCEL)
@@ -364,7 +367,10 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
                         * updated state to the recovery master.  this thread
                         * just needs to finish out the operation and call
                         * the unlockast. */
-                       ret = DLM_NORMAL;
+                       if (dlm_is_node_dead(dlm, owner))
+                               ret = DLM_NORMAL;
+                       else
+                               ret = DLM_NOLOCKMGR;
                } else {
                        /* something bad.  this will BUG in ocfs2 */
                        ret = dlm_err_to_dlm_status(tmpret);
@@ -638,7 +644,9 @@ retry:
 
        if (status == DLM_RECOVERING ||
            status == DLM_MIGRATING ||
-           status == DLM_FORWARD) {
+           status == DLM_FORWARD ||
+           status == DLM_NOLOCKMGR) {
+
                /* We want to go away for a tiny bit to allow recovery
                 * / migration to complete on this resource. I don't
                 * know of any wait queue we could sleep on as this
@@ -650,7 +658,7 @@ retry:
                msleep(50);
 
                mlog(0, "retrying unlock due to pending recovery/"
-                    "migration/in-progress\n");
+                    "migration/in-progress/reconnect\n");
                goto retry;
        }
 
index 2060fc398445a259d17167d43b97fed4ba7357ab..8add6f1030d7c0d9afc9aa56c918eb9bc3a10659 100644 (file)
@@ -205,6 +205,21 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
        return inode;
 }
 
+static void ocfs2_cleanup_add_entry_failure(struct ocfs2_super *osb,
+               struct dentry *dentry, struct inode *inode)
+{
+       struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
+
+       ocfs2_simple_drop_lockres(osb, &dl->dl_lockres);
+       ocfs2_lock_res_free(&dl->dl_lockres);
+       BUG_ON(dl->dl_count != 1);
+       spin_lock(&dentry_attach_lock);
+       dentry->d_fsdata = NULL;
+       spin_unlock(&dentry_attach_lock);
+       kfree(dl);
+       iput(inode);
+}
+
 static int ocfs2_mknod(struct inode *dir,
                       struct dentry *dentry,
                       umode_t mode,
@@ -231,6 +246,7 @@ static int ocfs2_mknod(struct inode *dir,
        sigset_t oldset;
        int did_block_signals = 0;
        struct posix_acl *default_acl = NULL, *acl = NULL;
+       struct ocfs2_dentry_lock *dl = NULL;
 
        trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
                          (unsigned long long)OCFS2_I(dir)->ip_blkno,
@@ -423,6 +439,8 @@ static int ocfs2_mknod(struct inode *dir,
                goto leave;
        }
 
+       dl = dentry->d_fsdata;
+
        status = ocfs2_add_entry(handle, dentry, inode,
                                 OCFS2_I(inode)->ip_blkno, parent_fe_bh,
                                 &lookup);
@@ -469,6 +487,9 @@ leave:
         * ocfs2_delete_inode will mutex_lock again.
         */
        if ((status < 0) && inode) {
+               if (dl)
+                       ocfs2_cleanup_add_entry_failure(osb, dentry, inode);
+
                OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
                clear_nlink(inode);
                iput(inode);
@@ -991,6 +1012,65 @@ leave:
        return status;
 }
 
+static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
+               u64 src_inode_no, u64 dest_inode_no)
+{
+       int ret = 0, i = 0;
+       u64 parent_inode_no = 0;
+       u64 child_inode_no = src_inode_no;
+       struct inode *child_inode;
+
+#define MAX_LOOKUP_TIMES 32
+       while (1) {
+               child_inode = ocfs2_iget(osb, child_inode_no, 0, 0);
+               if (IS_ERR(child_inode)) {
+                       ret = PTR_ERR(child_inode);
+                       break;
+               }
+
+               ret = ocfs2_inode_lock(child_inode, NULL, 0);
+               if (ret < 0) {
+                       iput(child_inode);
+                       if (ret != -ENOENT)
+                               mlog_errno(ret);
+                       break;
+               }
+
+               ret = ocfs2_lookup_ino_from_name(child_inode, "..", 2,
+                               &parent_inode_no);
+               ocfs2_inode_unlock(child_inode, 0);
+               iput(child_inode);
+               if (ret < 0) {
+                       ret = -ENOENT;
+                       break;
+               }
+
+               if (parent_inode_no == dest_inode_no) {
+                       ret = 1;
+                       break;
+               }
+
+               if (parent_inode_no == osb->root_inode->i_ino) {
+                       ret = 0;
+                       break;
+               }
+
+               child_inode_no = parent_inode_no;
+
+               if (++i >= MAX_LOOKUP_TIMES) {
+                       mlog(ML_NOTICE, "max lookup times reached, filesystem "
+                                       "may have nested directories, "
+                                       "src inode: %llu, dest inode: %llu.\n",
+                                       (unsigned long long)src_inode_no,
+                                       (unsigned long long)dest_inode_no);
+                       ret = 0;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
 /*
  * The only place this should be used is rename!
  * if they have the same id, then the 1st one is the only one locked.
@@ -1002,6 +1082,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
                             struct inode *inode2)
 {
        int status;
+       int inode1_is_ancestor, inode2_is_ancestor;
        struct ocfs2_inode_info *oi1 = OCFS2_I(inode1);
        struct ocfs2_inode_info *oi2 = OCFS2_I(inode2);
        struct buffer_head **tmpbh;
@@ -1015,9 +1096,26 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
        if (*bh2)
                *bh2 = NULL;
 
-       /* we always want to lock the one with the lower lockid first. */
+       /* we always want to lock the one with the lower lockid first.
+        * and if they are nested, we lock ancestor first */
        if (oi1->ip_blkno != oi2->ip_blkno) {
-               if (oi1->ip_blkno < oi2->ip_blkno) {
+               inode1_is_ancestor = ocfs2_check_if_ancestor(osb, oi2->ip_blkno,
+                               oi1->ip_blkno);
+               if (inode1_is_ancestor < 0) {
+                       status = inode1_is_ancestor;
+                       goto bail;
+               }
+
+               inode2_is_ancestor = ocfs2_check_if_ancestor(osb, oi1->ip_blkno,
+                               oi2->ip_blkno);
+               if (inode2_is_ancestor < 0) {
+                       status = inode2_is_ancestor;
+                       goto bail;
+               }
+
+               if ((inode1_is_ancestor == 1) ||
+                               (oi1->ip_blkno < oi2->ip_blkno &&
+                               inode2_is_ancestor == 0)) {
                        /* switch id1 and id2 around */
                        tmpbh = bh2;
                        bh2 = bh1;
@@ -1098,6 +1196,7 @@ static int ocfs2_rename(struct inode *old_dir,
        struct ocfs2_dir_lookup_result old_entry_lookup = { NULL, };
        struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
        struct ocfs2_dir_lookup_result target_insert = { NULL, };
+       bool should_add_orphan = false;
 
        /* At some point it might be nice to break this function up a
         * bit. */
@@ -1134,6 +1233,21 @@ static int ocfs2_rename(struct inode *old_dir,
                        goto bail;
                }
                rename_lock = 1;
+
+               /* here we cannot guarantee the inodes haven't just been
+                * changed, so check if they are nested again */
+               status = ocfs2_check_if_ancestor(osb, new_dir->i_ino,
+                               old_inode->i_ino);
+               if (status < 0) {
+                       mlog_errno(status);
+                       goto bail;
+               } else if (status == 1) {
+                       status = -EPERM;
+                       trace_ocfs2_rename_not_permitted(
+                                       (unsigned long long)old_inode->i_ino,
+                                       (unsigned long long)new_dir->i_ino);
+                       goto bail;
+               }
        }
 
        /* if old and new are the same, this'll just do one lock. */
@@ -1304,6 +1418,7 @@ static int ocfs2_rename(struct inode *old_dir,
                                mlog_errno(status);
                                goto bail;
                        }
+                       should_add_orphan = true;
                }
        } else {
                BUG_ON(new_dentry->d_parent->d_inode != new_dir);
@@ -1348,17 +1463,6 @@ static int ocfs2_rename(struct inode *old_dir,
                        goto bail;
                }
 
-               if (S_ISDIR(new_inode->i_mode) ||
-                   (ocfs2_read_links_count(newfe) == 1)) {
-                       status = ocfs2_orphan_add(osb, handle, new_inode,
-                                                 newfe_bh, orphan_name,
-                                                 &orphan_insert, orphan_dir);
-                       if (status < 0) {
-                               mlog_errno(status);
-                               goto bail;
-                       }
-               }
-
                /* change the dirent to point to the correct inode */
                status = ocfs2_update_entry(new_dir, handle, &target_lookup_res,
                                            old_inode);
@@ -1373,6 +1477,15 @@ static int ocfs2_rename(struct inode *old_dir,
                else
                        ocfs2_add_links_count(newfe, -1);
                ocfs2_journal_dirty(handle, newfe_bh);
+               if (should_add_orphan) {
+                       status = ocfs2_orphan_add(osb, handle, new_inode,
+                                       newfe_bh, orphan_name,
+                                       &orphan_insert, orphan_dir);
+                       if (status < 0) {
+                               mlog_errno(status);
+                               goto bail;
+                       }
+               }
        } else {
                /* if the name was not found in new_dir, add it now */
                status = ocfs2_add_entry(handle, new_dentry, old_inode,
@@ -1642,6 +1755,7 @@ static int ocfs2_symlink(struct inode *dir,
        struct ocfs2_dir_lookup_result lookup = { NULL, };
        sigset_t oldset;
        int did_block_signals = 0;
+       struct ocfs2_dentry_lock *dl = NULL;
 
        trace_ocfs2_symlink_begin(dir, dentry, symname,
                                  dentry->d_name.len, dentry->d_name.name);
@@ -1830,6 +1944,8 @@ static int ocfs2_symlink(struct inode *dir,
                goto bail;
        }
 
+       dl = dentry->d_fsdata;
+
        status = ocfs2_add_entry(handle, dentry, inode,
                                 le64_to_cpu(fe->i_blkno), parent_fe_bh,
                                 &lookup);
@@ -1864,6 +1980,9 @@ bail:
        if (xattr_ac)
                ocfs2_free_alloc_context(xattr_ac);
        if ((status < 0) && inode) {
+               if (dl)
+                       ocfs2_cleanup_add_entry_failure(osb, dentry, inode);
+
                OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
                clear_nlink(inode);
                iput(inode);
index 1b60c62aa9d6fa62940c0a9b8f9889943604170e..6cb019b7c6a83c4ec449baff12652e84b5fca06a 100644 (file)
@@ -2292,6 +2292,8 @@ TRACE_EVENT(ocfs2_rename,
                  __entry->new_len, __get_str(new_name))
 );
 
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_rename_not_permitted);
+
 TRACE_EVENT(ocfs2_rename_target_exists,
        TP_PROTO(int new_len, const char *new_name),
        TP_ARGS(new_len, new_name),
index 714e53b9cc6606a178ee2c8f0121fd12156c0539..636aab69ead559f718a9ebef4e6ca5e8dfad933e 100644 (file)
@@ -4288,9 +4288,16 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
                goto out;
        }
 
+       error = ocfs2_rw_lock(inode, 1);
+       if (error) {
+               mlog_errno(error);
+               goto out;
+       }
+
        error = ocfs2_inode_lock(inode, &old_bh, 1);
        if (error) {
                mlog_errno(error);
+               ocfs2_rw_unlock(inode, 1);
                goto out;
        }
 
@@ -4302,6 +4309,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
        up_write(&OCFS2_I(inode)->ip_xattr_sem);
 
        ocfs2_inode_unlock(inode, 1);
+       ocfs2_rw_unlock(inode, 1);
        brelse(old_bh);
 
        if (error) {
index c7a89cea5c5dc3d0974aa67e5f53372b500c4f67..ddb662b32447ca49cd206c3f4e1394ee558ceeb3 100644 (file)
@@ -1925,15 +1925,11 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
 
        ocfs2_shutdown_local_alloc(osb);
 
+       ocfs2_truncate_log_shutdown(osb);
+
        /* This will disable recovery and flush any recovery work. */
        ocfs2_recovery_exit(osb);
 
-       /*
-        * During dismount, when it recovers another node it will call
-        * ocfs2_recover_orphans and queue delayed work osb_truncate_log_wq.
-        */
-       ocfs2_truncate_log_shutdown(osb);
-
        ocfs2_journal_shutdown(osb);
 
        ocfs2_sync_blockdev(sb);
index 7cf5c996933650295ef37285183173dd2172a8a2..b91dd462ba85802b8bf037bfd55930aea5577686 100644 (file)
 #define IMX6SL_CLK_USDHC4              132
 #define IMX6SL_CLK_PLL4_AUDIO_DIV      133
 #define IMX6SL_CLK_SPBA                        134
-#define IMX6SL_CLK_END                 135
+#define IMX6SL_CLK_ENET                        135
+#define IMX6SL_CLK_END                 136
 
 #endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */
index 0d2c7397e02814b0ab0e0f710d7deeaf72201e4a..d80caa68aebd7434253cfe9b0d9376c0bf837be9 100644 (file)
@@ -10,6 +10,7 @@
 #define CLK_ETH1_PHY           4
 
 /* CLOCKGEN A1 */
+#define CLK_ICN_IF_2           0
 #define CLK_GMAC0_PHY          3
 
 #endif
index 552c779eb6af97bc0abb13f30d155203c281ee1e..f9bdbd13568dde25044d885e307acc89a8031d28 100644 (file)
@@ -10,6 +10,7 @@
 #define CLK_ETH1_PHY           4
 
 /* CLOCKGEN A1 */
+#define CLK_ICN_IF_2           0
 #define CLK_GMAC0_PHY          3
 
 #endif
index 6a45fb583ff1451c1b1e67a3cc7a438a92ffba6c..447775ee2c4b0e51429af5c8e06276ac8a19c65d 100644 (file)
@@ -32,15 +32,24 @@ static inline void touch_nmi_watchdog(void)
 #ifdef arch_trigger_all_cpu_backtrace
 static inline bool trigger_all_cpu_backtrace(void)
 {
-       arch_trigger_all_cpu_backtrace();
+       arch_trigger_all_cpu_backtrace(true);
 
        return true;
 }
+static inline bool trigger_allbutself_cpu_backtrace(void)
+{
+       arch_trigger_all_cpu_backtrace(false);
+       return true;
+}
 #else
 static inline bool trigger_all_cpu_backtrace(void)
 {
        return false;
 }
+static inline bool trigger_allbutself_cpu_backtrace(void)
+{
+       return false;
+}
 #endif
 
 #ifdef CONFIG_LOCKUP_DETECTOR
@@ -48,6 +57,7 @@ int hw_nmi_is_cpu_stuck(struct pt_regs *);
 u64 hw_nmi_get_sample_period(int watchdog_thresh);
 extern int watchdog_user_enabled;
 extern int watchdog_thresh;
+extern int sysctl_softlockup_all_cpu_backtrace;
 struct ctl_table;
 extern int proc_dowatchdog(struct ctl_table *, int ,
                           void __user *, size_t *, loff_t *);
index 3c545b48aeabdd177a09920fceda7b550baae54d..8304959ad33641b892f05fb216466b691b871caa 100644 (file)
@@ -360,6 +360,9 @@ static inline void ClearPageCompound(struct page *page)
        ClearPageHead(page);
 }
 #endif
+
+#define PG_head_mask ((1L << PG_head))
+
 #else
 /*
  * Reduce page flag use as much as possible by overlapping
index 0fd06fef9fac9044cf42e6352a9116bfad820847..26b4f2e13275d13246ef8709919cf1ab7e83247a 100644 (file)
 #undef __field_ext
 #define __field_ext(type, item, filter_type)   type    item;
 
+#undef __field_struct
+#define __field_struct(type, item)     type    item;
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)    type    item;
+
 #undef __array
 #define __array(type, item, len)       type    item[len];
 
 #undef __field_ext
 #define __field_ext(type, item, filter_type)
 
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)
+
 #undef __array
 #define __array(type, item, len)
 
@@ -315,9 +327,21 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = {     \
        if (ret)                                                        \
                return ret;
 
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)                    \
+       ret = trace_define_field(event_call, #type, #item,              \
+                                offsetof(typeof(field), item),         \
+                                sizeof(field.item),                    \
+                                0, filter_type);                       \
+       if (ret)                                                        \
+               return ret;
+
 #undef __field
 #define __field(type, item)    __field_ext(type, item, FILTER_OTHER)
 
+#undef __field_struct
+#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
+
 #undef __array
 #define __array(type, item, len)                                       \
        do {                                                            \
@@ -379,6 +403,12 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call)  \
 #undef __field_ext
 #define __field_ext(type, item, filter_type)
 
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)
+
 #undef __array
 #define __array(type, item, len)
 
@@ -550,6 +580,9 @@ static inline notrace int ftrace_get_offsets_##call(                        \
 #undef __field
 #define __field(type, item)
 
+#undef __field_struct
+#define __field_struct(type, item)
+
 #undef __array
 #define __array(type, item, len)
 
index fed853f3d7aa8a637209bcd72f042a177b754aad..9674145e2f6abf910b88b6be6c7b66b4d56dc152 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/tracepoint.h>
 #include <linux/unistd.h>
 #include <linux/ftrace_event.h>
+#include <linux/thread_info.h>
 
 #include <asm/ptrace.h>
 
@@ -32,4 +33,18 @@ struct syscall_metadata {
        struct ftrace_event_call *exit_event;
 };
 
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
+static inline void syscall_tracepoint_update(struct task_struct *p)
+{
+       if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+               set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+       else
+               clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+}
+#else
+static inline void syscall_tracepoint_update(struct task_struct *p)
+{
+}
+#endif
+
 #endif /* _TRACE_SYSCALL_H */
index d2799d1fc952757270f791901394ed2b9f10062b..6a13c46cd87dbe72bc830bf109256fa458f22ad1 100644 (file)
@@ -1487,7 +1487,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        total_forks++;
        spin_unlock(&current->sighand->siglock);
+       syscall_tracepoint_update(p);
        write_unlock_irq(&tasklist_lock);
+
        proc_fork_connector(p);
        cgroup_post_fork(p);
        if (clone_flags & CLONE_THREAD)
index 6748688813d0684f4357e02150087a6d83d90a7c..369f41a9412481029354034d4c6e0193fe132f56 100644 (file)
@@ -1617,6 +1617,7 @@ static int __init crash_save_vmcoreinfo_init(void)
 #ifdef CONFIG_MEMORY_FAILURE
        VMCOREINFO_NUMBER(PG_hwpoison);
 #endif
+       VMCOREINFO_NUMBER(PG_head_mask);
        VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
 
        arch_crash_save_vmcoreinfo();
index 306f8180b0d53165c960d7844295d8c6a4d6151a..80c33f8de14ffbdb83aaf6be0bc5c31c5d3e6351 100644 (file)
@@ -29,6 +29,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 
+static void flush_smp_call_function_queue(bool warn_cpu_offline);
+
 static int
 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
@@ -51,12 +53,27 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
+               /* Fall-through to the CPU_DEAD[_FROZEN] case. */
 
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
                free_cpumask_var(cfd->cpumask);
                free_percpu(cfd->csd);
                break;
+
+       case CPU_DYING:
+       case CPU_DYING_FROZEN:
+               /*
+                * The IPIs for the smp-call-function callbacks queued by other
+                * CPUs might arrive late, either due to hardware latencies or
+                * because this CPU disabled interrupts (inside stop-machine)
+                * before the IPIs were sent. So flush out any pending callbacks
+                * explicitly (without waiting for the IPIs to arrive), to
+                * ensure that the outgoing CPU doesn't go offline with work
+                * still pending.
+                */
+               flush_smp_call_function_queue(false);
+               break;
 #endif
        };
 
@@ -177,23 +194,47 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
        return 0;
 }
 
-/*
- * Invoked by arch to handle an IPI for call function single. Must be
- * called from the arch with interrupts disabled.
+/**
+ * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
+ *
+ * Invoked by arch to handle an IPI for call function single.
+ * Must be called with interrupts disabled.
  */
 void generic_smp_call_function_single_interrupt(void)
 {
+       flush_smp_call_function_queue(true);
+}
+
+/**
+ * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
+ *
+ * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
+ *                   offline CPU. Skip this check if set to 'false'.
+ *
+ * Flush any pending smp-call-function callbacks queued on this CPU. This is
+ * invoked by the generic IPI handler, as well as by a CPU about to go offline,
+ * to ensure that all pending IPI callbacks are run before it goes completely
+ * offline.
+ *
+ * Loop through the call_single_queue and run all the queued callbacks.
+ * Must be called with interrupts disabled.
+ */
+static void flush_smp_call_function_queue(bool warn_cpu_offline)
+{
+       struct llist_head *head;
        struct llist_node *entry;
        struct call_single_data *csd, *csd_next;
        static bool warned;
 
-       entry = llist_del_all(&__get_cpu_var(call_single_queue));
+       WARN_ON(!irqs_disabled());
+
+       head = &__get_cpu_var(call_single_queue);
+       entry = llist_del_all(head);
        entry = llist_reverse_order(entry);
 
-       /*
-        * Shouldn't receive this interrupt on a cpu that is not yet online.
-        */
-       if (unlikely(!cpu_online(smp_processor_id()) && !warned)) {
+       /* There shouldn't be any pending callbacks on an offline CPU. */
+       if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
+                    !warned && !llist_empty(head))) {
                warned = true;
                WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
 
index 7de6555cfea0f3484e8decbf927c7c456c227175..75b22e22a72c1abd4865bf7c0313af5514970939 100644 (file)
@@ -136,7 +136,6 @@ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
 /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
 static int maxolduid = 65535;
 static int minolduid;
-static int min_percpu_pagelist_fract = 8;
 
 static int ngroups_max = NGROUPS_MAX;
 static const int cap_last_cap = CAP_LAST_CAP;
@@ -861,6 +860,17 @@ static struct ctl_table kern_table[] = {
                .extra1         = &zero,
                .extra2         = &one,
        },
+#ifdef CONFIG_SMP
+       {
+               .procname       = "softlockup_all_cpu_backtrace",
+               .data           = &sysctl_softlockup_all_cpu_backtrace,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
+#endif /* CONFIG_SMP */
        {
                .procname       = "nmi_watchdog",
                .data           = &watchdog_user_enabled,
@@ -1317,7 +1327,7 @@ static struct ctl_table vm_table[] = {
                .maxlen         = sizeof(percpu_pagelist_fraction),
                .mode           = 0644,
                .proc_handler   = percpu_pagelist_fraction_sysctl_handler,
-               .extra1         = &min_percpu_pagelist_fract,
+               .extra1         = &zero,
        },
 #ifdef CONFIG_MMU
        {
index 33cbd8c203f8bbc9a461dd9e5abe9cdbab1f54c3..3490407dc7b7fefc697b8284375ab315c1df02f0 100644 (file)
@@ -492,33 +492,29 @@ static int sys_tracepoint_refcount;
 
 void syscall_regfunc(void)
 {
-       unsigned long flags;
-       struct task_struct *g, *t;
+       struct task_struct *p, *t;
 
        if (!sys_tracepoint_refcount) {
-               read_lock_irqsave(&tasklist_lock, flags);
-               do_each_thread(g, t) {
-                       /* Skip kernel threads. */
-                       if (t->mm)
-                               set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
-               } while_each_thread(g, t);
-               read_unlock_irqrestore(&tasklist_lock, flags);
+               read_lock(&tasklist_lock);
+               for_each_process_thread(p, t) {
+                       set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
+               }
+               read_unlock(&tasklist_lock);
        }
        sys_tracepoint_refcount++;
 }
 
 void syscall_unregfunc(void)
 {
-       unsigned long flags;
-       struct task_struct *g, *t;
+       struct task_struct *p, *t;
 
        sys_tracepoint_refcount--;
        if (!sys_tracepoint_refcount) {
-               read_lock_irqsave(&tasklist_lock, flags);
-               do_each_thread(g, t) {
+               read_lock(&tasklist_lock);
+               for_each_process_thread(p, t) {
                        clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
-               } while_each_thread(g, t);
-               read_unlock_irqrestore(&tasklist_lock, flags);
+               }
+               read_unlock(&tasklist_lock);
        }
 }
 #endif
index 516203e665fcbddc8eb7f55fa7b5a2dd5634cfc5..c3319bd1b0408c1f5822748a4d0b1567c799760d 100644 (file)
 
 int watchdog_user_enabled = 1;
 int __read_mostly watchdog_thresh = 10;
+#ifdef CONFIG_SMP
+int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+#else
+#define sysctl_softlockup_all_cpu_backtrace 0
+#endif
+
 static int __read_mostly watchdog_running;
 static u64 __read_mostly sample_period;
 
@@ -47,6 +53,7 @@ static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
+static unsigned long soft_lockup_nmi_warn;
 
 /* boot commands */
 /*
@@ -95,6 +102,15 @@ static int __init nosoftlockup_setup(char *str)
 }
 __setup("nosoftlockup", nosoftlockup_setup);
 /*  */
+#ifdef CONFIG_SMP
+static int __init softlockup_all_cpu_backtrace_setup(char *str)
+{
+       sysctl_softlockup_all_cpu_backtrace =
+               !!simple_strtol(str, NULL, 0);
+       return 1;
+}
+__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
+#endif
 
 /*
  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
@@ -271,6 +287,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
        struct pt_regs *regs = get_irq_regs();
        int duration;
+       int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
        /* kick the hardlockup detector */
        watchdog_interrupt_count();
@@ -317,6 +334,17 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                if (__this_cpu_read(soft_watchdog_warn) == true)
                        return HRTIMER_RESTART;
 
+               if (softlockup_all_cpu_backtrace) {
+                       /* Prevent multiple soft-lockup reports if one cpu is already
+                        * engaged in dumping cpu back traces
+                        */
+                       if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
+                               /* Someone else will report us. Let's give up */
+                               __this_cpu_write(soft_watchdog_warn, true);
+                               return HRTIMER_RESTART;
+                       }
+               }
+
                printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
                        smp_processor_id(), duration,
                        current->comm, task_pid_nr(current));
@@ -327,6 +355,17 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                else
                        dump_stack();
 
+               if (softlockup_all_cpu_backtrace) {
+                       /* Avoid generating two back traces for current
+                        * given that one is already made above
+                        */
+                       trigger_allbutself_cpu_backtrace();
+
+                       clear_bit(0, &soft_lockup_nmi_warn);
+                       /* Barrier to sync with other cpus */
+                       smp_mb__after_atomic();
+               }
+
                if (softlockup_panic)
                        panic("softlockup: hung tasks");
                __this_cpu_write(soft_watchdog_warn, true);
@@ -527,10 +566,8 @@ static void update_timers_all_cpus(void)
        int cpu;
 
        get_online_cpus();
-       preempt_disable();
        for_each_online_cpu(cpu)
                update_timers(cpu);
-       preempt_enable();
        put_online_cpus();
 }
 
index 7cfcc1b8e1017006f1d1868cc26d63350c2df3b9..7a638aa3545bfa7d409dfa5110bc060acfd381a5 100644 (file)
@@ -930,7 +930,7 @@ config LOCKDEP
        bool
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
        select STACKTRACE
-       select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC
+       select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE
        select KALLSYMS
        select KALLSYMS_ALL
 
@@ -1408,7 +1408,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
        depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
        depends on !X86_64
        select STACKTRACE
-       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
+       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE
        help
          Provide stacktrace filter for fault-injection capabilities
 
index df6839e3ce0886a481e8565f8b19d5c71c9b299a..99a03acb7d470570b816ada76f8045d9930c6a27 100644 (file)
@@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
                        len = *ip++;
                        for (; len == 255; length += 255)
                                len = *ip++;
+                       if (unlikely(length > (size_t)(length + len)))
+                               goto _output_error;
                        length += len;
                }
 
index 569985d522d518a8992929d5924b6a5062ff9e93..8563081e8da38fb81e0335d2589c9fcebcd81266 100644 (file)
 #include <linux/lzo.h>
 #include "lzodefs.h"
 
-#define HAVE_IP(x)      ((size_t)(ip_end - ip) >= (size_t)(x))
-#define HAVE_OP(x)      ((size_t)(op_end - op) >= (size_t)(x))
-#define NEED_IP(x)      if (!HAVE_IP(x)) goto input_overrun
-#define NEED_OP(x)      if (!HAVE_OP(x)) goto output_overrun
-#define TEST_LB(m_pos)  if ((m_pos) < out) goto lookbehind_overrun
+#define HAVE_IP(t, x)                                  \
+       (((size_t)(ip_end - ip) >= (size_t)(t + x)) &&  \
+        (((t + x) >= t) && ((t + x) >= x)))
+
+#define HAVE_OP(t, x)                                  \
+       (((size_t)(op_end - op) >= (size_t)(t + x)) &&  \
+        (((t + x) >= t) && ((t + x) >= x)))
+
+#define NEED_IP(t, x)                                  \
+       do {                                            \
+               if (!HAVE_IP(t, x))                     \
+                       goto input_overrun;             \
+       } while (0)
+
+#define NEED_OP(t, x)                                  \
+       do {                                            \
+               if (!HAVE_OP(t, x))                     \
+                       goto output_overrun;            \
+       } while (0)
+
+#define TEST_LB(m_pos)                                 \
+       do {                                            \
+               if ((m_pos) < out)                      \
+                       goto lookbehind_overrun;        \
+       } while (0)
 
 int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
                          unsigned char *out, size_t *out_len)
@@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
                                        while (unlikely(*ip == 0)) {
                                                t += 255;
                                                ip++;
-                                               NEED_IP(1);
+                                               NEED_IP(1, 0);
                                        }
                                        t += 15 + *ip++;
                                }
                                t += 3;
 copy_literal_run:
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-                               if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+                               if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
                                        const unsigned char *ie = ip + t;
                                        unsigned char *oe = op + t;
                                        do {
@@ -81,8 +101,8 @@ copy_literal_run:
                                } else
 #endif
                                {
-                                       NEED_OP(t);
-                                       NEED_IP(t + 3);
+                                       NEED_OP(t, 0);
+                                       NEED_IP(t, 3);
                                        do {
                                                *op++ = *ip++;
                                        } while (--t > 0);
@@ -95,7 +115,7 @@ copy_literal_run:
                                m_pos -= t >> 2;
                                m_pos -= *ip++ << 2;
                                TEST_LB(m_pos);
-                               NEED_OP(2);
+                               NEED_OP(2, 0);
                                op[0] = m_pos[0];
                                op[1] = m_pos[1];
                                op += 2;
@@ -119,10 +139,10 @@ copy_literal_run:
                                while (unlikely(*ip == 0)) {
                                        t += 255;
                                        ip++;
-                                       NEED_IP(1);
+                                       NEED_IP(1, 0);
                                }
                                t += 31 + *ip++;
-                               NEED_IP(2);
+                               NEED_IP(2, 0);
                        }
                        m_pos = op - 1;
                        next = get_unaligned_le16(ip);
@@ -137,10 +157,10 @@ copy_literal_run:
                                while (unlikely(*ip == 0)) {
                                        t += 255;
                                        ip++;
-                                       NEED_IP(1);
+                                       NEED_IP(1, 0);
                                }
                                t += 7 + *ip++;
-                               NEED_IP(2);
+                               NEED_IP(2, 0);
                        }
                        next = get_unaligned_le16(ip);
                        ip += 2;
@@ -154,7 +174,7 @@ copy_literal_run:
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
                if (op - m_pos >= 8) {
                        unsigned char *oe = op + t;
-                       if (likely(HAVE_OP(t + 15))) {
+                       if (likely(HAVE_OP(t, 15))) {
                                do {
                                        COPY8(op, m_pos);
                                        op += 8;
@@ -164,7 +184,7 @@ copy_literal_run:
                                        m_pos += 8;
                                } while (op < oe);
                                op = oe;
-                               if (HAVE_IP(6)) {
+                               if (HAVE_IP(6, 0)) {
                                        state = next;
                                        COPY4(op, ip);
                                        op += next;
@@ -172,7 +192,7 @@ copy_literal_run:
                                        continue;
                                }
                        } else {
-                               NEED_OP(t);
+                               NEED_OP(t, 0);
                                do {
                                        *op++ = *m_pos++;
                                } while (op < oe);
@@ -181,7 +201,7 @@ copy_literal_run:
 #endif
                {
                        unsigned char *oe = op + t;
-                       NEED_OP(t);
+                       NEED_OP(t, 0);
                        op[0] = m_pos[0];
                        op[1] = m_pos[1];
                        op += 2;
@@ -194,15 +214,15 @@ match_next:
                state = next;
                t = next;
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-               if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+               if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
                        COPY4(op, ip);
                        op += t;
                        ip += t;
                } else
 #endif
                {
-                       NEED_IP(t + 3);
-                       NEED_OP(t);
+                       NEED_IP(t, 3);
+                       NEED_OP(t, 0);
                        while (t > 0) {
                                *op++ = *ip++;
                                t--;
index e60837dc785c4ce35f579374527587c77665ecce..33514d88fef9b041cef11c74717091eec4805f80 100644 (file)
@@ -941,6 +941,37 @@ unlock:
        spin_unlock(ptl);
 }
 
+/*
+ * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
+ * during copy_user_huge_page()'s copy_page_rep(): in the case when
+ * the source page gets split and a tail freed before copy completes.
+ * Called under pmd_lock of checked pmd, so safe from splitting itself.
+ */
+static void get_user_huge_page(struct page *page)
+{
+       if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
+               struct page *endpage = page + HPAGE_PMD_NR;
+
+               atomic_add(HPAGE_PMD_NR, &page->_count);
+               while (++page < endpage)
+                       get_huge_page_tail(page);
+       } else {
+               get_page(page);
+       }
+}
+
+static void put_user_huge_page(struct page *page)
+{
+       if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
+               struct page *endpage = page + HPAGE_PMD_NR;
+
+               while (page < endpage)
+                       put_page(page++);
+       } else {
+               put_page(page);
+       }
+}
+
 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long address,
@@ -1074,7 +1105,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                ret |= VM_FAULT_WRITE;
                goto out_unlock;
        }
-       get_page(page);
+       get_user_huge_page(page);
        spin_unlock(ptl);
 alloc:
        if (transparent_hugepage_enabled(vma) &&
@@ -1095,7 +1126,7 @@ alloc:
                                split_huge_page(page);
                                ret |= VM_FAULT_FALLBACK;
                        }
-                       put_page(page);
+                       put_user_huge_page(page);
                }
                count_vm_event(THP_FAULT_FALLBACK);
                goto out;
@@ -1105,7 +1136,7 @@ alloc:
                put_page(new_page);
                if (page) {
                        split_huge_page(page);
-                       put_page(page);
+                       put_user_huge_page(page);
                } else
                        split_huge_page_pmd(vma, address, pmd);
                ret |= VM_FAULT_FALLBACK;
@@ -1127,7 +1158,7 @@ alloc:
 
        spin_lock(ptl);
        if (page)
-               put_page(page);
+               put_user_huge_page(page);
        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
                spin_unlock(ptl);
                mem_cgroup_uncharge_page(new_page);
@@ -2392,8 +2423,6 @@ static void collapse_huge_page(struct mm_struct *mm,
        pmd = mm_find_pmd(mm, address);
        if (!pmd)
                goto out;
-       if (pmd_trans_huge(*pmd))
-               goto out;
 
        anon_vma_lock_write(vma->anon_vma);
 
@@ -2492,8 +2521,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
        pmd = mm_find_pmd(mm, address);
        if (!pmd)
                goto out;
-       if (pmd_trans_huge(*pmd))
-               goto out;
 
        memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2846,12 +2873,22 @@ void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
 static void split_huge_page_address(struct mm_struct *mm,
                                    unsigned long address)
 {
+       pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
 
        VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
 
-       pmd = mm_find_pmd(mm, address);
-       if (!pmd)
+       pgd = pgd_offset(mm, address);
+       if (!pgd_present(*pgd))
+               return;
+
+       pud = pud_offset(pgd, address);
+       if (!pud_present(*pud))
+               return;
+
+       pmd = pmd_offset(pud, address);
+       if (!pmd_present(*pmd))
                return;
        /*
         * Caller holds the mmap_sem write mode, so a huge pmd cannot
index 226910cb7c9be8ed95439103f2b6f02d1e928376..2024bbd573d2a9ca8a08842cdf0b99d2062cbee1 100644 (file)
@@ -2520,6 +2520,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
                update_mmu_cache(vma, address, ptep);
 }
 
+static int is_hugetlb_entry_migration(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_migration_entry(swp))
+               return 1;
+       else
+               return 0;
+}
+
+static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
+               return 1;
+       else
+               return 0;
+}
 
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                            struct vm_area_struct *vma)
@@ -2559,10 +2584,26 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                dst_ptl = huge_pte_lock(h, dst, dst_pte);
                src_ptl = huge_pte_lockptr(h, src, src_pte);
                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
-               if (!huge_pte_none(huge_ptep_get(src_pte))) {
+               entry = huge_ptep_get(src_pte);
+               if (huge_pte_none(entry)) { /* skip none entry */
+                       ;
+               } else if (unlikely(is_hugetlb_entry_migration(entry) ||
+                                   is_hugetlb_entry_hwpoisoned(entry))) {
+                       swp_entry_t swp_entry = pte_to_swp_entry(entry);
+
+                       if (is_write_migration_entry(swp_entry) && cow) {
+                               /*
+                                * COW mappings require pages in both
+                                * parent and child to be set to read.
+                                */
+                               make_migration_entry_read(&swp_entry);
+                               entry = swp_entry_to_pte(swp_entry);
+                               set_huge_pte_at(src, addr, src_pte, entry);
+                       }
+                       set_huge_pte_at(dst, addr, dst_pte, entry);
+               } else {
                        if (cow)
                                huge_ptep_set_wrprotect(src, addr, src_pte);
-                       entry = huge_ptep_get(src_pte);
                        ptepage = pte_page(entry);
                        get_page(ptepage);
                        page_dup_rmap(ptepage);
@@ -2578,32 +2619,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
        return ret;
 }
 
-static int is_hugetlb_entry_migration(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return 0;
-       swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_migration_entry(swp))
-               return 1;
-       else
-               return 0;
-}
-
-static int is_hugetlb_entry_hwpoisoned(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return 0;
-       swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
-               return 1;
-       else
-               return 0;
-}
-
 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                            unsigned long start, unsigned long end,
                            struct page *ref_page)
index 68710e80994afed815c58b59a1a3cf421df8101f..346ddc9e4c0da44ed0c24b63d49cd40bc3d813de 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -945,7 +945,6 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
        pmd = mm_find_pmd(mm, addr);
        if (!pmd)
                goto out;
-       BUG_ON(pmd_trans_huge(*pmd));
 
        mmun_start = addr;
        mmun_end   = addr + PAGE_SIZE;
index 284974230459728b2ec92f4afdc9e49a4e03ec21..eb58de19f815d07adaa0a0485308dd0095d39423 100644 (file)
@@ -656,19 +656,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
  * @nodes and @flags,) it's isolated and queued to the pagelist which is
  * passed via @private.)
  */
-static struct vm_area_struct *
+static int
 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
                const nodemask_t *nodes, unsigned long flags, void *private)
 {
-       int err;
-       struct vm_area_struct *first, *vma, *prev;
-
+       int err = 0;
+       struct vm_area_struct *vma, *prev;
 
-       first = find_vma(mm, start);
-       if (!first)
-               return ERR_PTR(-EFAULT);
+       vma = find_vma(mm, start);
+       if (!vma)
+               return -EFAULT;
        prev = NULL;
-       for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
+       for (; vma && vma->vm_start < end; vma = vma->vm_next) {
                unsigned long endvma = vma->vm_end;
 
                if (endvma > end)
@@ -678,9 +677,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 
                if (!(flags & MPOL_MF_DISCONTIG_OK)) {
                        if (!vma->vm_next && vma->vm_end < end)
-                               return ERR_PTR(-EFAULT);
+                               return -EFAULT;
                        if (prev && prev->vm_end < vma->vm_start)
-                               return ERR_PTR(-EFAULT);
+                               return -EFAULT;
                }
 
                if (flags & MPOL_MF_LAZY) {
@@ -694,15 +693,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 
                        err = queue_pages_pgd_range(vma, start, endvma, nodes,
                                                flags, private);
-                       if (err) {
-                               first = ERR_PTR(err);
+                       if (err)
                                break;
-                       }
                }
 next:
                prev = vma;
        }
-       return first;
+       return err;
 }
 
 /*
@@ -1156,16 +1153,17 @@ out:
 
 /*
  * Allocate a new page for page migration based on vma policy.
- * Start assuming that page is mapped by vma pointed to by @private.
+ * Start by assuming the page is mapped by the same vma as contains @start.
  * Search forward from there, if not.  N.B., this assumes that the
  * list of pages handed to migrate_pages()--which is how we get here--
  * is in virtual address order.
  */
-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
+static struct page *new_page(struct page *page, unsigned long start, int **x)
 {
-       struct vm_area_struct *vma = (struct vm_area_struct *)private;
+       struct vm_area_struct *vma;
        unsigned long uninitialized_var(address);
 
+       vma = find_vma(current->mm, start);
        while (vma) {
                address = page_address_in_vma(page, vma);
                if (address != -EFAULT)
@@ -1195,7 +1193,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
        return -ENOSYS;
 }
 
-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
+static struct page *new_page(struct page *page, unsigned long start, int **x)
 {
        return NULL;
 }
@@ -1205,7 +1203,6 @@ static long do_mbind(unsigned long start, unsigned long len,
                     unsigned short mode, unsigned short mode_flags,
                     nodemask_t *nmask, unsigned long flags)
 {
-       struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
        struct mempolicy *new;
        unsigned long end;
@@ -1271,11 +1268,9 @@ static long do_mbind(unsigned long start, unsigned long len,
        if (err)
                goto mpol_out;
 
-       vma = queue_pages_range(mm, start, end, nmask,
+       err = queue_pages_range(mm, start, end, nmask,
                          flags | MPOL_MF_INVERT, &pagelist);
-
-       err = PTR_ERR(vma);     /* maybe ... */
-       if (!IS_ERR(vma))
+       if (!err)
                err = mbind_range(mm, start, end, new);
 
        if (!err) {
@@ -1283,9 +1278,8 @@ static long do_mbind(unsigned long start, unsigned long len,
 
                if (!list_empty(&pagelist)) {
                        WARN_ON_ONCE(flags & MPOL_MF_LAZY);
-                       nr_failed = migrate_pages(&pagelist, new_vma_page,
-                                       NULL, (unsigned long)vma,
-                                       MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
+                       nr_failed = migrate_pages(&pagelist, new_page, NULL,
+                               start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
                        if (nr_failed)
                                putback_movable_pages(&pagelist);
                }
index 63f0cd5599997ef8d1a42110c1fc9ccd6d920ac1..9e0beaa918454abbcd63e94ee6cefb5f108f751f 100644 (file)
@@ -120,8 +120,6 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
                pmd = mm_find_pmd(mm, addr);
                if (!pmd)
                        goto out;
-               if (pmd_trans_huge(*pmd))
-                       goto out;
 
                ptep = pte_offset_map(pmd, addr);
 
index b78e3a8f5ee74fca5db7ebaa260ebe2644f224f3..4a852f6c5709dbda2a29561714b704972ec292f6 100644 (file)
@@ -786,7 +786,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
        for (i = 0; i < VMACACHE_SIZE; i++) {
                /* if the vma is cached, invalidate the entire cache */
                if (curr->vmacache[i] == vma) {
-                       vmacache_invalidate(curr->mm);
+                       vmacache_invalidate(mm);
                        break;
                }
        }
index 4f59fa29eda8b9b22e9532f27a3ffc5535893452..20d17f8266fed9e482055c5c68034b8149f3329c 100644 (file)
@@ -69,6 +69,7 @@
 
 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
 static DEFINE_MUTEX(pcp_batch_high_lock);
+#define MIN_PERCPU_PAGELIST_FRACTION   (8)
 
 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
 DEFINE_PER_CPU(int, numa_node);
@@ -4145,7 +4146,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
        memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
 #endif
 
-static int __meminit zone_batchsize(struct zone *zone)
+static int zone_batchsize(struct zone *zone)
 {
 #ifdef CONFIG_MMU
        int batch;
@@ -4261,8 +4262,8 @@ static void pageset_set_high(struct per_cpu_pageset *p,
        pageset_update(&p->pcp, high, batch);
 }
 
-static void __meminit pageset_set_high_and_batch(struct zone *zone,
-               struct per_cpu_pageset *pcp)
+static void pageset_set_high_and_batch(struct zone *zone,
+                                      struct per_cpu_pageset *pcp)
 {
        if (percpu_pagelist_fraction)
                pageset_set_high(pcp,
@@ -5881,23 +5882,38 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
        void __user *buffer, size_t *length, loff_t *ppos)
 {
        struct zone *zone;
-       unsigned int cpu;
+       int old_percpu_pagelist_fraction;
        int ret;
 
+       mutex_lock(&pcp_batch_high_lock);
+       old_percpu_pagelist_fraction = percpu_pagelist_fraction;
+
        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
-       if (!write || (ret < 0))
-               return ret;
+       if (!write || ret < 0)
+               goto out;
+
+       /* Sanity checking to avoid pcp imbalance */
+       if (percpu_pagelist_fraction &&
+           percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
+               percpu_pagelist_fraction = old_percpu_pagelist_fraction;
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* No change? */
+       if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
+               goto out;
 
-       mutex_lock(&pcp_batch_high_lock);
        for_each_populated_zone(zone) {
-               unsigned long  high;
-               high = zone->managed_pages / percpu_pagelist_fraction;
+               unsigned int cpu;
+
                for_each_possible_cpu(cpu)
-                       pageset_set_high(per_cpu_ptr(zone->pageset, cpu),
-                                        high);
+                       pageset_set_high_and_batch(zone,
+                                       per_cpu_ptr(zone->pageset, cpu));
        }
+out:
        mutex_unlock(&pcp_batch_high_lock);
-       return 0;
+       return ret;
 }
 
 int hashdist = HASHDIST_DEFAULT;
index bf05fc872ae822cda0b5bd7b6fa473a824870009..b7e94ebbd09e88c3b356e36fe89ed72b89e14474 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -569,6 +569,7 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd = NULL;
+       pmd_t pmde;
 
        pgd = pgd_offset(mm, address);
        if (!pgd_present(*pgd))
@@ -579,7 +580,13 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
                goto out;
 
        pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd))
+       /*
+        * Some THP functions use the sequence pmdp_clear_flush(), set_pmd_at()
+        * without holding anon_vma lock for write.  So when looking for a
+        * genuine pmde (in which to find pte), test present and !THP together.
+        */
+       pmde = ACCESS_ONCE(*pmd);
+       if (!pmd_present(pmde) || pmd_trans_huge(pmde))
                pmd = NULL;
 out:
        return pmd;
@@ -615,9 +622,6 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
        if (!pmd)
                return NULL;
 
-       if (pmd_trans_huge(*pmd))
-               return NULL;
-
        pte = pte_offset_map(pmd, address);
        /* Make a quick check before getting the lock */
        if (!sync && !pte_present(*pte)) {
index f484c276e994923a5c05577b42d5a9dcc58ae7cc..8f419cff9e3451fa3b4a98026d332d45ae80ea86 100644 (file)
@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
 #define SHORT_SYMLINK_LEN 128
 
 /*
- * shmem_fallocate and shmem_writepage communicate via inode->i_private
- * (with i_mutex making sure that it has only one user at a time):
- * we would prefer not to enlarge the shmem inode just for that.
+ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
+ * inode->i_private (with i_mutex making sure that it has only one user at
+ * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
+       int     mode;           /* FALLOC_FL mode currently operating */
        pgoff_t start;          /* start of range currently being fallocated */
        pgoff_t next;           /* the next page offset to be fallocated */
        pgoff_t nr_falloced;    /* how many new pages have been fallocated */
@@ -759,6 +760,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                        spin_lock(&inode->i_lock);
                        shmem_falloc = inode->i_private;
                        if (shmem_falloc &&
+                           !shmem_falloc->mode &&
                            index >= shmem_falloc->start &&
                            index < shmem_falloc->next)
                                shmem_falloc->nr_unswapped++;
@@ -1233,6 +1235,44 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        int error;
        int ret = VM_FAULT_LOCKED;
 
+       /*
+        * Trinity finds that probing a hole which tmpfs is punching can
+        * prevent the hole-punch from ever completing: which in turn
+        * locks writers out with its hold on i_mutex.  So refrain from
+        * faulting pages into the hole while it's being punched, and
+        * wait on i_mutex to be released if vmf->flags permits.
+        */
+       if (unlikely(inode->i_private)) {
+               struct shmem_falloc *shmem_falloc;
+
+               spin_lock(&inode->i_lock);
+               shmem_falloc = inode->i_private;
+               if (!shmem_falloc ||
+                   shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
+                   vmf->pgoff < shmem_falloc->start ||
+                   vmf->pgoff >= shmem_falloc->next)
+                       shmem_falloc = NULL;
+               spin_unlock(&inode->i_lock);
+               /*
+                * i_lock has protected us from taking shmem_falloc seriously
+                * once return from shmem_fallocate() went back up that stack.
+                * i_lock does not serialize with i_mutex at all, but it does
+                * not matter if sometimes we wait unnecessarily, or sometimes
+                * miss out on waiting: we just need to make those cases rare.
+                */
+               if (shmem_falloc) {
+                       if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
+                          !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               up_read(&vma->vm_mm->mmap_sem);
+                               mutex_lock(&inode->i_mutex);
+                               mutex_unlock(&inode->i_mutex);
+                               return VM_FAULT_RETRY;
+                       }
+                       /* cond_resched? Leave that to GUP or return to user */
+                       return VM_FAULT_NOPAGE;
+               }
+       }
+
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
        if (error)
                return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
@@ -1724,20 +1764,31 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
        pgoff_t start, index, end;
        int error;
 
+       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+               return -EOPNOTSUPP;
+
        mutex_lock(&inode->i_mutex);
 
+       shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
+
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                struct address_space *mapping = file->f_mapping;
                loff_t unmap_start = round_up(offset, PAGE_SIZE);
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
 
+               shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+               shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+               spin_lock(&inode->i_lock);
+               inode->i_private = &shmem_falloc;
+               spin_unlock(&inode->i_lock);
+
                if ((u64)unmap_end > (u64)unmap_start)
                        unmap_mapping_range(mapping, unmap_start,
                                            1 + unmap_end - unmap_start, 0);
                shmem_truncate_range(inode, offset, offset + len - 1);
                /* No need to unmap again: hole-punching leaves COWed pages */
                error = 0;
-               goto out;
+               goto undone;
        }
 
        /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
index 9ca3b87edabc699a7da3ceea71b61ee34a300e03..3070b929a1bfa67778e415525403e9b36e392344 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -386,6 +386,39 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 
 #endif
 
+#define OBJECT_FREE (0)
+#define OBJECT_ACTIVE (1)
+
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+
+static void set_obj_status(struct page *page, int idx, int val)
+{
+       int freelist_size;
+       char *status;
+       struct kmem_cache *cachep = page->slab_cache;
+
+       freelist_size = cachep->num * sizeof(freelist_idx_t);
+       status = (char *)page->freelist + freelist_size;
+       status[idx] = val;
+}
+
+static inline unsigned int get_obj_status(struct page *page, int idx)
+{
+       int freelist_size;
+       char *status;
+       struct kmem_cache *cachep = page->slab_cache;
+
+       freelist_size = cachep->num * sizeof(freelist_idx_t);
+       status = (char *)page->freelist + freelist_size;
+
+       return status[idx];
+}
+
+#else
+static inline void set_obj_status(struct page *page, int idx, int val) {}
+
+#endif
+
 /*
  * Do not go above this order unless 0 objects fit into the slab or
  * overridden on the command line.
@@ -576,12 +609,30 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
        return cachep->array[smp_processor_id()];
 }
 
+static size_t calculate_freelist_size(int nr_objs, size_t align)
+{
+       size_t freelist_size;
+
+       freelist_size = nr_objs * sizeof(freelist_idx_t);
+       if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
+               freelist_size += nr_objs * sizeof(char);
+
+       if (align)
+               freelist_size = ALIGN(freelist_size, align);
+
+       return freelist_size;
+}
+
 static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
                                size_t idx_size, size_t align)
 {
        int nr_objs;
+       size_t remained_size;
        size_t freelist_size;
+       int extra_space = 0;
 
+       if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
+               extra_space = sizeof(char);
        /*
         * Ignore padding for the initial guess. The padding
         * is at most @align-1 bytes, and @buffer_size is at
@@ -590,14 +641,15 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
         * into the memory allocation when taking the padding
         * into account.
         */
-       nr_objs = slab_size / (buffer_size + idx_size);
+       nr_objs = slab_size / (buffer_size + idx_size + extra_space);
 
        /*
         * This calculated number will be either the right
         * amount, or one greater than what we want.
         */
-       freelist_size = slab_size - nr_objs * buffer_size;
-       if (freelist_size < ALIGN(nr_objs * idx_size, align))
+       remained_size = slab_size - nr_objs * buffer_size;
+       freelist_size = calculate_freelist_size(nr_objs, align);
+       if (remained_size < freelist_size)
                nr_objs--;
 
        return nr_objs;
@@ -635,7 +687,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
        } else {
                nr_objs = calculate_nr_objs(slab_size, buffer_size,
                                        sizeof(freelist_idx_t), align);
-               mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align);
+               mgmt_size = calculate_freelist_size(nr_objs, align);
        }
        *num = nr_objs;
        *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
@@ -2041,13 +2093,16 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
                        break;
 
                if (flags & CFLGS_OFF_SLAB) {
+                       size_t freelist_size_per_obj = sizeof(freelist_idx_t);
                        /*
                         * Max number of objs-per-slab for caches which
                         * use off-slab slabs. Needed to avoid a possible
                         * looping condition in cache_grow().
                         */
+                       if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
+                               freelist_size_per_obj += sizeof(char);
                        offslab_limit = size;
-                       offslab_limit /= sizeof(freelist_idx_t);
+                       offslab_limit /= freelist_size_per_obj;
 
                        if (num > offslab_limit)
                                break;
@@ -2294,8 +2349,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
        if (!cachep->num)
                return -E2BIG;
 
-       freelist_size =
-               ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
+       freelist_size = calculate_freelist_size(cachep->num, cachep->align);
 
        /*
         * If the slab has been placed off-slab, and we have enough space then
@@ -2308,7 +2362,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
 
        if (flags & CFLGS_OFF_SLAB) {
                /* really off slab. No need for manual alignment */
-               freelist_size = cachep->num * sizeof(freelist_idx_t);
+               freelist_size = calculate_freelist_size(cachep->num, 0);
 
 #ifdef CONFIG_PAGE_POISONING
                /* If we're going to use the generic kernel_map_pages()
@@ -2612,6 +2666,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
                if (cachep->ctor)
                        cachep->ctor(objp);
 #endif
+               set_obj_status(page, i, OBJECT_FREE);
                set_free_obj(page, i, i);
        }
 }
@@ -2820,6 +2875,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
        BUG_ON(objnr >= cachep->num);
        BUG_ON(objp != index_to_obj(cachep, page, objnr));
 
+       set_obj_status(page, objnr, OBJECT_FREE);
        if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
                if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2953,6 +3009,8 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                                gfp_t flags, void *objp, unsigned long caller)
 {
+       struct page *page;
+
        if (!objp)
                return objp;
        if (cachep->flags & SLAB_POISON) {
@@ -2983,6 +3041,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                *dbg_redzone1(cachep, objp) = RED_ACTIVE;
                *dbg_redzone2(cachep, objp) = RED_ACTIVE;
        }
+
+       page = virt_to_head_page(objp);
+       set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
        objp += obj_offset(cachep);
        if (cachep->ctor && cachep->flags & SLAB_POISON)
                cachep->ctor(objp);
@@ -4219,21 +4280,12 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c,
                                                struct page *page)
 {
        void *p;
-       int i, j;
+       int i;
 
        if (n[0] == n[1])
                return;
        for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
-               bool active = true;
-
-               for (j = page->active; j < c->num; j++) {
-                       /* Skip freed item */
-                       if (get_free_obj(page, j) == i) {
-                               active = false;
-                               break;
-                       }
-               }
-               if (!active)
+               if (get_obj_status(page, i) != OBJECT_ACTIVE)
                        continue;
 
                if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
index 6af373236d7318729528bc288510363071725649..4b0113f73ee9a11b1f50aee09f9a56964f01a3c4 100644 (file)
@@ -56,7 +56,8 @@
  * struct:  This defines the way the data will be stored in the ring buffer.
  *    There are currently two types of elements. __field and __array.
  *    a __field is broken up into (type, name). Where type can be any
- *    type but an array.
+ *    primitive type (integer, long or pointer). __field_struct() can
+ *    be any static complex data value (struct, union, but not an array).
  *    For an array. there are three fields. (type, name, size). The
  *    type of elements in the array, the name of the field and the size
  *    of the array.
index 010b18ef4ea0cb9e1575b9e54f3922c7bd7f9888..182be0f124074c6d508d8a97e498ea7925665a1f 100755 (executable)
@@ -3476,12 +3476,17 @@ sub process {
                        }
                }
 
-# unnecessary return in a void function? (a single leading tab, then return;)
-               if ($sline =~ /^\+\treturn\s*;\s*$/ &&
-                   $prevline =~ /^\+/) {
+# unnecessary return in a void function
+# at end-of-function, with the previous line a single leading tab, then return;
+# and the line before that not a goto label target like "out:"
+               if ($sline =~ /^[ \+]}\s*$/ &&
+                   $prevline =~ /^\+\treturn\s*;\s*$/ &&
+                   $linenr >= 3 &&
+                   $lines[$linenr - 3] =~ /^[ +]/ &&
+                   $lines[$linenr - 3] !~ /^[ +]\s*$Ident\s*:/) {
                        WARN("RETURN_VOID",
-                            "void function return statements are not generally useful\n" . $herecurr);
-               }
+                            "void function return statements are not generally useful\n" . $hereprev);
+               }
 
 # if statements using unnecessary parentheses - ie: if ((foo == bar))
                if ($^V && $^V ge 5.10.0 &&
index 51267f4184a6c2a669902c9aa0f18e0ff82e0ba4..2cede239a074dd110aa9ff3a6119b55f9c9d4ff6 100644 (file)
@@ -2,7 +2,7 @@ PROGS := tm-resched-dscr
 
 all: $(PROGS)
 
-$(PROGS):
+$(PROGS): ../harness.c
 
 run_tests: all
        @-for PROG in $(PROGS); do \
index ee98e3886af200e423a05ab134c1c5423f380407..42d4c8caad813f19143b4237c497d9f64015fbb7 100644 (file)
@@ -28,6 +28,8 @@
 #include <assert.h>
 #include <asm/tm.h>
 
+#include "utils.h"
+
 #define TBEGIN          ".long 0x7C00051D ;"
 #define TEND            ".long 0x7C00055D ;"
 #define TCHECK          ".long 0x7C00059C ;"
@@ -36,7 +38,8 @@
 #define SPRN_TEXASR     0x82
 #define SPRN_DSCR       0x03
 
-int main(void) {
+int test_body(void)
+{
        uint64_t rv, dscr1 = 1, dscr2, texasr;
 
        printf("Check DSCR TM context switch: ");
@@ -81,10 +84,15 @@ int main(void) {
                }
                if (dscr2 != dscr1) {
                        printf(" FAIL\n");
-                       exit(EXIT_FAILURE);
+                       return 1;
                } else {
                        printf(" OK\n");
-                       exit(EXIT_SUCCESS);
+                       return 0;
                }
        }
 }
+
+int main(void)
+{
+       return test_harness(test_body, "tm_resched_dscr");
+}